2024-11-24 03:45:21,187 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-24 03:45:21,203 main DEBUG Took 0.013522 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-24 03:45:21,203 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-24 03:45:21,204 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-24 03:45:21,205 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-24 03:45:21,206 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 03:45:21,213 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-24 03:45:21,228 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 03:45:21,230 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 03:45:21,231 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 03:45:21,231 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 03:45:21,232 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 03:45:21,233 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 03:45:21,234 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 03:45:21,235 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 03:45:21,235 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 03:45:21,236 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 03:45:21,237 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 03:45:21,237 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 03:45:21,238 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 03:45:21,238 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 03:45:21,239 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 03:45:21,240 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 03:45:21,240 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 03:45:21,241 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 03:45:21,241 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 03:45:21,242 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 03:45:21,242 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 03:45:21,243 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 03:45:21,244 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 03:45:21,244 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 03:45:21,245 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 03:45:21,245 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-24 03:45:21,248 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 03:45:21,250 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-24 03:45:21,253 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-24 03:45:21,254 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-24 03:45:21,256 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-24 03:45:21,257 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-24 03:45:21,268 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-24 03:45:21,272 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-24 03:45:21,274 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-24 03:45:21,274 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-24 03:45:21,275 main DEBUG createAppenders(={Console}) 2024-11-24 03:45:21,276 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-24 03:45:21,276 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-24 03:45:21,276 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-24 03:45:21,277 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-24 03:45:21,278 main DEBUG OutputStream closed 2024-11-24 03:45:21,278 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-24 03:45:21,278 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-24 03:45:21,279 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-24 03:45:21,375 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-24 03:45:21,378 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-24 03:45:21,380 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-24 03:45:21,381 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-24 03:45:21,382 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-24 03:45:21,382 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-24 03:45:21,383 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-24 03:45:21,383 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-24 03:45:21,384 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-24 03:45:21,384 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-24 03:45:21,384 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-24 03:45:21,385 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-24 03:45:21,385 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-24 03:45:21,386 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-24 03:45:21,386 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-24 03:45:21,386 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-24 03:45:21,387 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-24 03:45:21,388 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-24 03:45:21,390 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-24 03:45:21,391 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-24 03:45:21,391 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-24 03:45:21,392 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-24T03:45:21,683 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3 2024-11-24 03:45:21,686 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-24 03:45:21,687 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-24T03:45:21,696 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-24T03:45:21,751 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=320, ProcessCount=11, AvailableMemoryMB=8281 2024-11-24T03:45:21,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T03:45:21,778 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/cluster_777397b2-2966-7725-1698-e3d55723f117, deleteOnExit=true 2024-11-24T03:45:21,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T03:45:21,780 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/test.cache.data in system properties and HBase conf 2024-11-24T03:45:21,781 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T03:45:21,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/hadoop.log.dir in system properties and HBase conf 2024-11-24T03:45:21,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T03:45:21,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T03:45:21,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T03:45:21,890 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-24T03:45:22,008 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T03:45:22,013 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T03:45:22,014 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T03:45:22,015 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T03:45:22,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T03:45:22,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T03:45:22,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T03:45:22,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T03:45:22,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T03:45:22,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T03:45:22,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/nfs.dump.dir in system properties and HBase conf 2024-11-24T03:45:22,021 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/java.io.tmpdir in system properties and HBase conf 2024-11-24T03:45:22,021 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T03:45:22,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T03:45:22,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T03:45:22,570 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T03:45:23,086 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-24T03:45:23,205 INFO [Time-limited test {}] log.Log(170): Logging initialized @2938ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-24T03:45:23,309 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:45:23,413 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:45:23,447 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:45:23,447 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:45:23,449 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T03:45:23,467 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:45:23,471 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aee6cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:45:23,472 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@380b8195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:45:23,697 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6de997b9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/java.io.tmpdir/jetty-localhost-35817-hadoop-hdfs-3_4_1-tests_jar-_-any-10206886813992962286/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T03:45:23,705 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a0da00a{HTTP/1.1, (http/1.1)}{localhost:35817} 2024-11-24T03:45:23,706 INFO [Time-limited test {}] server.Server(415): Started @3441ms 2024-11-24T03:45:23,736 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T03:45:24,349 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:45:24,359 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:45:24,361 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:45:24,361 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:45:24,362 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T03:45:24,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3305dd74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:45:24,364 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7893eb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:45:24,474 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f93babe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/java.io.tmpdir/jetty-localhost-38373-hadoop-hdfs-3_4_1-tests_jar-_-any-2311042558527754775/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:45:24,475 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@737d6c99{HTTP/1.1, (http/1.1)}{localhost:38373} 2024-11-24T03:45:24,476 INFO [Time-limited test {}] server.Server(415): Started @4211ms 2024-11-24T03:45:24,540 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:45:24,684 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:45:24,691 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:45:24,692 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:45:24,692 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:45:24,692 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T03:45:24,693 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ff5148a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:45:24,694 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25ca9bb3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:45:24,817 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c963ecd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/java.io.tmpdir/jetty-localhost-38103-hadoop-hdfs-3_4_1-tests_jar-_-any-14624013827459925099/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:45:24,818 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a10aed{HTTP/1.1, (http/1.1)}{localhost:38103} 2024-11-24T03:45:24,818 INFO [Time-limited test {}] server.Server(415): Started @4553ms 2024-11-24T03:45:24,820 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:45:25,758 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/cluster_777397b2-2966-7725-1698-e3d55723f117/data/data4/current/BP-169950544-172.17.0.2-1732419922671/current, will proceed with Du for space computation calculation, 2024-11-24T03:45:25,758 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/cluster_777397b2-2966-7725-1698-e3d55723f117/data/data3/current/BP-169950544-172.17.0.2-1732419922671/current, will proceed with Du for space computation calculation, 2024-11-24T03:45:25,758 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/cluster_777397b2-2966-7725-1698-e3d55723f117/data/data1/current/BP-169950544-172.17.0.2-1732419922671/current, will proceed with Du for space computation calculation, 2024-11-24T03:45:25,763 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/cluster_777397b2-2966-7725-1698-e3d55723f117/data/data2/current/BP-169950544-172.17.0.2-1732419922671/current, will proceed with Du for space computation calculation, 2024-11-24T03:45:25,822 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:45:25,823 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:45:25,898 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x943f6574df5e65ce with lease ID 0x161d383047dd110c: Processing first storage report for DS-c57555de-8cce-41f7-bfae-b61a6e924ace from datanode DatanodeRegistration(127.0.0.1:35221, datanodeUuid=14914e37-994b-4bdc-a710-4f8bfbc895cd, infoPort=41273, infoSecurePort=0, ipcPort=40175, storageInfo=lv=-57;cid=testClusterID;nsid=1105658902;c=1732419922671) 2024-11-24T03:45:25,900 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x943f6574df5e65ce with lease ID 0x161d383047dd110c: from storage DS-c57555de-8cce-41f7-bfae-b61a6e924ace node DatanodeRegistration(127.0.0.1:35221, datanodeUuid=14914e37-994b-4bdc-a710-4f8bfbc895cd, infoPort=41273, infoSecurePort=0, ipcPort=40175, storageInfo=lv=-57;cid=testClusterID;nsid=1105658902;c=1732419922671), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-24T03:45:25,901 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf21166086b3d0a39 with lease ID 0x161d383047dd110d: Processing first storage report for DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e from datanode DatanodeRegistration(127.0.0.1:38065, datanodeUuid=aa7021d6-c27d-4877-94f9-b6e1408026f1, infoPort=37883, infoSecurePort=0, ipcPort=39645, storageInfo=lv=-57;cid=testClusterID;nsid=1105658902;c=1732419922671) 2024-11-24T03:45:25,901 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf21166086b3d0a39 with lease ID 0x161d383047dd110d: from storage DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e node DatanodeRegistration(127.0.0.1:38065, datanodeUuid=aa7021d6-c27d-4877-94f9-b6e1408026f1, infoPort=37883, infoSecurePort=0, ipcPort=39645, storageInfo=lv=-57;cid=testClusterID;nsid=1105658902;c=1732419922671), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:45:25,901 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x943f6574df5e65ce with lease ID 0x161d383047dd110c: Processing first storage report for DS-33531f15-e873-43c6-846f-13ee12e18b6a from datanode DatanodeRegistration(127.0.0.1:35221, datanodeUuid=14914e37-994b-4bdc-a710-4f8bfbc895cd, infoPort=41273, infoSecurePort=0, ipcPort=40175, storageInfo=lv=-57;cid=testClusterID;nsid=1105658902;c=1732419922671) 2024-11-24T03:45:25,902 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x943f6574df5e65ce with lease ID 0x161d383047dd110c: from storage DS-33531f15-e873-43c6-846f-13ee12e18b6a node DatanodeRegistration(127.0.0.1:35221, datanodeUuid=14914e37-994b-4bdc-a710-4f8bfbc895cd, infoPort=41273, infoSecurePort=0, ipcPort=40175, storageInfo=lv=-57;cid=testClusterID;nsid=1105658902;c=1732419922671), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T03:45:25,902 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf21166086b3d0a39 with lease ID 0x161d383047dd110d: Processing first storage report for DS-a096bd77-3c8b-4288-a625-22c3696a1187 from datanode DatanodeRegistration(127.0.0.1:38065, datanodeUuid=aa7021d6-c27d-4877-94f9-b6e1408026f1, infoPort=37883, infoSecurePort=0, ipcPort=39645, storageInfo=lv=-57;cid=testClusterID;nsid=1105658902;c=1732419922671) 2024-11-24T03:45:25,902 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf21166086b3d0a39 with lease ID 0x161d383047dd110d: from storage DS-a096bd77-3c8b-4288-a625-22c3696a1187 node DatanodeRegistration(127.0.0.1:38065, datanodeUuid=aa7021d6-c27d-4877-94f9-b6e1408026f1, infoPort=37883, infoSecurePort=0, ipcPort=39645, storageInfo=lv=-57;cid=testClusterID;nsid=1105658902;c=1732419922671), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:45:25,959 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3 2024-11-24T03:45:26,074 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/cluster_777397b2-2966-7725-1698-e3d55723f117/zookeeper_0, clientPort=50335, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/cluster_777397b2-2966-7725-1698-e3d55723f117/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/cluster_777397b2-2966-7725-1698-e3d55723f117/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T03:45:26,086 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50335 2024-11-24T03:45:26,097 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:45:26,100 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:45:26,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741825_1001 (size=7) 2024-11-24T03:45:26,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741825_1001 (size=7) 2024-11-24T03:45:26,756 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070 with version=8 2024-11-24T03:45:26,756 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/hbase-staging 2024-11-24T03:45:26,877 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-24T03:45:27,182 INFO [Time-limited test {}] client.ConnectionUtils(128): master/71d8d2d6408d:0 server-side Connection retries=45 2024-11-24T03:45:27,191 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:45:27,191 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T03:45:27,196 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T03:45:27,197 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:45:27,197 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T03:45:27,349 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T03:45:27,410 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-24T03:45:27,418 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-24T03:45:27,423 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T03:45:27,455 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 20401 (auto-detected) 2024-11-24T03:45:27,456 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-24T03:45:27,481 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43787 2024-11-24T03:45:27,509 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43787 connecting to ZooKeeper ensemble=127.0.0.1:50335 2024-11-24T03:45:27,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:437870x0, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T03:45:27,641 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43787-0x1016c3bcae90000 connected 2024-11-24T03:45:27,730 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:45:27,732 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:45:27,742 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:45:27,746 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070, hbase.cluster.distributed=false 2024-11-24T03:45:27,770 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T03:45:27,775 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43787 2024-11-24T03:45:27,776 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43787 2024-11-24T03:45:27,777 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43787 2024-11-24T03:45:27,779 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43787 2024-11-24T03:45:27,783 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43787 2024-11-24T03:45:27,910 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/71d8d2d6408d:0 server-side Connection retries=45 2024-11-24T03:45:27,912 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:45:27,912 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T03:45:27,913 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T03:45:27,913 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:45:27,913 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T03:45:27,917 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T03:45:27,920 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T03:45:27,921 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39103 2024-11-24T03:45:27,923 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39103 connecting to ZooKeeper ensemble=127.0.0.1:50335 2024-11-24T03:45:27,924 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:45:27,929 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:45:27,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:391030x0, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T03:45:27,963 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:391030x0, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:45:27,963 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39103-0x1016c3bcae90001 connected 2024-11-24T03:45:27,967 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T03:45:27,974 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T03:45:27,976 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T03:45:27,981 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T03:45:27,982 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39103 2024-11-24T03:45:27,987 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39103 2024-11-24T03:45:27,987 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39103 2024-11-24T03:45:27,988 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39103 2024-11-24T03:45:27,988 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39103 2024-11-24T03:45:28,001 DEBUG [M:0;71d8d2d6408d:43787 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;71d8d2d6408d:43787 2024-11-24T03:45:28,001 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/71d8d2d6408d,43787,1732419926986 2024-11-24T03:45:28,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:45:28,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:45:28,014 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/71d8d2d6408d,43787,1732419926986 2024-11-24T03:45:28,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T03:45:28,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:45:28,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:45:28,047 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T03:45:28,048 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/71d8d2d6408d,43787,1732419926986 from backup master directory 2024-11-24T03:45:28,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:45:28,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/71d8d2d6408d,43787,1732419926986 2024-11-24T03:45:28,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:45:28,060 WARN [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T03:45:28,060 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=71d8d2d6408d,43787,1732419926986 2024-11-24T03:45:28,062 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-24T03:45:28,065 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-24T03:45:28,135 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/hbase.id] with ID: 66cc5583-df64-4269-83ef-db65d3a3b4bf 2024-11-24T03:45:28,135 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/.tmp/hbase.id 2024-11-24T03:45:28,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741826_1002 (size=42) 2024-11-24T03:45:28,164 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/.tmp/hbase.id]:[hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/hbase.id] 2024-11-24T03:45:28,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741826_1002 (size=42) 2024-11-24T03:45:28,224 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:45:28,231 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T03:45:28,255 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 21ms. 2024-11-24T03:45:28,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:45:28,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:45:28,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741827_1003 (size=196) 2024-11-24T03:45:28,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741827_1003 (size=196) 2024-11-24T03:45:28,325 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T03:45:28,328 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T03:45:28,335 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:45:28,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741828_1004 (size=1189) 2024-11-24T03:45:28,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741828_1004 (size=1189) 2024-11-24T03:45:28,789 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store 2024-11-24T03:45:28,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741829_1005 (size=34) 2024-11-24T03:45:28,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741829_1005 (size=34) 2024-11-24T03:45:28,816 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-24T03:45:28,821 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:45:28,822 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T03:45:28,823 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:45:28,823 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:45:28,825 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T03:45:28,825 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:45:28,826 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:45:28,827 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732419928822Disabling compacts and flushes for region at 1732419928822Disabling writes for close at 1732419928825 (+3 ms)Writing region close event to WAL at 1732419928825Closed at 1732419928825 2024-11-24T03:45:28,830 WARN [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/.initializing 2024-11-24T03:45:28,830 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/WALs/71d8d2d6408d,43787,1732419926986 2024-11-24T03:45:28,854 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C43787%2C1732419926986, suffix=, logDir=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/WALs/71d8d2d6408d,43787,1732419926986, archiveDir=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/oldWALs, maxLogs=10 2024-11-24T03:45:28,863 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C43787%2C1732419926986.1732419928859 2024-11-24T03:45:28,881 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/WALs/71d8d2d6408d,43787,1732419926986/71d8d2d6408d%2C43787%2C1732419926986.1732419928859 2024-11-24T03:45:28,892 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37883:37883),(127.0.0.1/127.0.0.1:41273:41273)] 2024-11-24T03:45:28,895 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:45:28,896 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:45:28,899 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:45:28,900 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:45:28,934 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:45:28,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T03:45:28,962 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:45:28,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:45:28,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:45:28,970 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T03:45:28,970 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:45:28,972 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:45:28,972 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:45:28,975 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T03:45:28,975 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:45:28,977 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:45:28,977 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:45:28,980 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T03:45:28,980 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:45:28,982 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:45:28,983 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:45:28,987 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:45:28,989 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:45:28,995 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:45:28,996 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:45:29,000 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T03:45:29,004 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:45:29,009 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:45:29,011 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=697235, jitterRate=-0.11342014372348785}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T03:45:29,018 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732419928912Initializing all the Stores at 1732419928914 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732419928914Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732419928915 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732419928915Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732419928915Cleaning up temporary data from old regions at 1732419928996 (+81 ms)Region opened successfully at 1732419929018 (+22 ms) 2024-11-24T03:45:29,019 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T03:45:29,047 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f998dc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=71d8d2d6408d/172.17.0.2:0 2024-11-24T03:45:29,074 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T03:45:29,083 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T03:45:29,083 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T03:45:29,086 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T03:45:29,087 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-24T03:45:29,092 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-24T03:45:29,092 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T03:45:29,116 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T03:45:29,127 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T03:45:29,175 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T03:45:29,178 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T03:45:29,180 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T03:45:29,191 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T03:45:29,194 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T03:45:29,199 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T03:45:29,211 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T03:45:29,213 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T03:45:29,219 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T03:45:29,239 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T03:45:29,250 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T03:45:29,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T03:45:29,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T03:45:29,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:45:29,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:45:29,265 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=71d8d2d6408d,43787,1732419926986, sessionid=0x1016c3bcae90000, setting cluster-up flag (Was=false) 2024-11-24T03:45:29,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:45:29,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:45:29,320 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T03:45:29,322 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=71d8d2d6408d,43787,1732419926986 2024-11-24T03:45:29,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:45:29,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:45:29,370 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T03:45:29,371 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=71d8d2d6408d,43787,1732419926986 2024-11-24T03:45:29,378 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T03:45:29,393 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(746): ClusterId : 66cc5583-df64-4269-83ef-db65d3a3b4bf 2024-11-24T03:45:29,396 DEBUG [RS:0;71d8d2d6408d:39103 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T03:45:29,412 DEBUG [RS:0;71d8d2d6408d:39103 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T03:45:29,412 DEBUG [RS:0;71d8d2d6408d:39103 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T03:45:29,421 DEBUG [RS:0;71d8d2d6408d:39103 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T03:45:29,422 DEBUG [RS:0;71d8d2d6408d:39103 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a176ad3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=71d8d2d6408d/172.17.0.2:0 2024-11-24T03:45:29,441 DEBUG [RS:0;71d8d2d6408d:39103 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;71d8d2d6408d:39103 2024-11-24T03:45:29,445 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T03:45:29,445 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T03:45:29,445 DEBUG [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T03:45:29,448 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(2659): reportForDuty to master=71d8d2d6408d,43787,1732419926986 with port=39103, startcode=1732419927868 2024-11-24T03:45:29,462 DEBUG [RS:0;71d8d2d6408d:39103 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T03:45:29,466 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T03:45:29,478 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T03:45:29,486 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T03:45:29,493 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 71d8d2d6408d,43787,1732419926986 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T03:45:29,501 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:45:29,501 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:45:29,502 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:45:29,502 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:45:29,502 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/71d8d2d6408d:0, corePoolSize=10, maxPoolSize=10 2024-11-24T03:45:29,502 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:45:29,502 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T03:45:29,503 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:45:29,508 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732419959508 2024-11-24T03:45:29,509 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:45:29,509 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T03:45:29,510 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T03:45:29,511 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T03:45:29,515 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T03:45:29,516 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T03:45:29,516 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T03:45:29,516 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T03:45:29,516 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:45:29,517 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T03:45:29,519 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:29,533 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T03:45:29,534 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T03:45:29,535 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T03:45:29,539 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T03:45:29,540 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T03:45:29,544 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48895, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T03:45:29,546 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732419929544,5,FailOnTimeoutGroup] 2024-11-24T03:45:29,551 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732419929547,5,FailOnTimeoutGroup] 2024-11-24T03:45:29,551 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:29,551 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T03:45:29,553 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:29,553 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:29,555 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43787 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 71d8d2d6408d,39103,1732419927868 2024-11-24T03:45:29,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741831_1007 (size=1321) 2024-11-24T03:45:29,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741831_1007 (size=1321) 2024-11-24T03:45:29,558 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43787 {}] master.ServerManager(517): Registering regionserver=71d8d2d6408d,39103,1732419927868 2024-11-24T03:45:29,576 DEBUG [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070 2024-11-24T03:45:29,576 DEBUG [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38669 2024-11-24T03:45:29,576 DEBUG [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T03:45:29,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T03:45:29,603 DEBUG [RS:0;71d8d2d6408d:39103 {}] zookeeper.ZKUtil(111): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/71d8d2d6408d,39103,1732419927868 2024-11-24T03:45:29,604 WARN [RS:0;71d8d2d6408d:39103 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T03:45:29,604 INFO [RS:0;71d8d2d6408d:39103 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:45:29,604 DEBUG [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868 2024-11-24T03:45:29,606 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [71d8d2d6408d,39103,1732419927868] 2024-11-24T03:45:29,629 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T03:45:29,645 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T03:45:29,650 INFO [RS:0;71d8d2d6408d:39103 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T03:45:29,650 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:29,651 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T03:45:29,656 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T03:45:29,658 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:29,658 DEBUG [RS:0;71d8d2d6408d:39103 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:45:29,658 DEBUG [RS:0;71d8d2d6408d:39103 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:45:29,658 DEBUG [RS:0;71d8d2d6408d:39103 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:45:29,658 DEBUG [RS:0;71d8d2d6408d:39103 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:45:29,659 DEBUG [RS:0;71d8d2d6408d:39103 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:45:29,659 DEBUG [RS:0;71d8d2d6408d:39103 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/71d8d2d6408d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T03:45:29,659 DEBUG [RS:0;71d8d2d6408d:39103 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:45:29,659 DEBUG [RS:0;71d8d2d6408d:39103 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:45:29,659 DEBUG [RS:0;71d8d2d6408d:39103 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:45:29,659 DEBUG [RS:0;71d8d2d6408d:39103 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:45:29,659 DEBUG [RS:0;71d8d2d6408d:39103 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:45:29,659 DEBUG [RS:0;71d8d2d6408d:39103 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:45:29,660 DEBUG [RS:0;71d8d2d6408d:39103 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/71d8d2d6408d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T03:45:29,660 DEBUG [RS:0;71d8d2d6408d:39103 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T03:45:29,661 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:29,661 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:29,661 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:29,661 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:29,661 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:29,661 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,39103,1732419927868-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T03:45:29,679 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T03:45:29,681 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,39103,1732419927868-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:29,681 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:29,681 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.Replication(171): 71d8d2d6408d,39103,1732419927868 started 2024-11-24T03:45:29,702 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:29,702 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(1482): Serving as 71d8d2d6408d,39103,1732419927868, RpcServer on 71d8d2d6408d/172.17.0.2:39103, sessionid=0x1016c3bcae90001 2024-11-24T03:45:29,703 DEBUG [RS:0;71d8d2d6408d:39103 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T03:45:29,703 DEBUG [RS:0;71d8d2d6408d:39103 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 71d8d2d6408d,39103,1732419927868 2024-11-24T03:45:29,704 DEBUG [RS:0;71d8d2d6408d:39103 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '71d8d2d6408d,39103,1732419927868' 2024-11-24T03:45:29,704 DEBUG [RS:0;71d8d2d6408d:39103 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T03:45:29,705 DEBUG [RS:0;71d8d2d6408d:39103 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T03:45:29,706 DEBUG [RS:0;71d8d2d6408d:39103 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T03:45:29,706 DEBUG [RS:0;71d8d2d6408d:39103 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T03:45:29,706 DEBUG [RS:0;71d8d2d6408d:39103 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 71d8d2d6408d,39103,1732419927868 2024-11-24T03:45:29,706 DEBUG [RS:0;71d8d2d6408d:39103 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '71d8d2d6408d,39103,1732419927868' 2024-11-24T03:45:29,706 DEBUG [RS:0;71d8d2d6408d:39103 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T03:45:29,707 DEBUG [RS:0;71d8d2d6408d:39103 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T03:45:29,708 DEBUG [RS:0;71d8d2d6408d:39103 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T03:45:29,708 INFO [RS:0;71d8d2d6408d:39103 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T03:45:29,708 INFO [RS:0;71d8d2d6408d:39103 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T03:45:29,821 INFO [RS:0;71d8d2d6408d:39103 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C39103%2C1732419927868, suffix=, logDir=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868, archiveDir=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/oldWALs, maxLogs=32 2024-11-24T03:45:29,823 INFO [RS:0;71d8d2d6408d:39103 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C39103%2C1732419927868.1732419929823 2024-11-24T03:45:29,832 INFO [RS:0;71d8d2d6408d:39103 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419929823 2024-11-24T03:45:29,833 DEBUG [RS:0;71d8d2d6408d:39103 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37883:37883),(127.0.0.1/127.0.0.1:41273:41273)] 2024-11-24T03:45:29,961 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T03:45:29,962 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070 2024-11-24T03:45:29,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741833_1009 (size=32) 2024-11-24T03:45:29,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741833_1009 (size=32) 2024-11-24T03:45:30,377 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:45:30,383 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T03:45:30,387 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T03:45:30,388 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:45:30,389 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:45:30,389 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T03:45:30,391 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T03:45:30,392 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:45:30,393 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:45:30,393 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T03:45:30,395 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T03:45:30,395 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:45:30,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:45:30,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T03:45:30,399 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T03:45:30,399 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:45:30,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:45:30,400 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T03:45:30,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740 2024-11-24T03:45:30,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740 2024-11-24T03:45:30,405 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T03:45:30,405 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T03:45:30,406 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T03:45:30,409 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T03:45:30,412 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:45:30,413 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876287, jitterRate=0.11425738036632538}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T03:45:30,416 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732419930378Initializing all the Stores at 1732419930382 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732419930382Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732419930382Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732419930382Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732419930383 (+1 ms)Cleaning up temporary data from old regions at 1732419930405 (+22 ms)Region opened successfully at 1732419930416 (+11 ms) 2024-11-24T03:45:30,416 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T03:45:30,416 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T03:45:30,417 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T03:45:30,417 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T03:45:30,417 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T03:45:30,418 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T03:45:30,418 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732419930416Disabling compacts and flushes for region at 1732419930416Disabling writes for close at 1732419930417 (+1 ms)Writing region close event to WAL at 1732419930418 (+1 ms)Closed at 1732419930418 2024-11-24T03:45:30,421 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:45:30,421 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T03:45:30,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T03:45:30,434 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T03:45:30,437 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T03:45:30,593 DEBUG [71d8d2d6408d:43787 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T03:45:30,603 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=71d8d2d6408d,39103,1732419927868 2024-11-24T03:45:30,610 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 71d8d2d6408d,39103,1732419927868, state=OPENING 2024-11-24T03:45:30,669 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T03:45:30,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:45:30,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:45:30,680 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:45:30,680 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:45:30,683 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T03:45:30,687 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=71d8d2d6408d,39103,1732419927868}] 2024-11-24T03:45:30,866 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T03:45:30,869 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51059, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T03:45:30,881 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T03:45:30,882 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:45:30,885 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C39103%2C1732419927868.meta, suffix=.meta, logDir=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868, archiveDir=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/oldWALs, maxLogs=32 2024-11-24T03:45:30,888 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C39103%2C1732419927868.meta.1732419930887.meta 2024-11-24T03:45:30,895 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.meta.1732419930887.meta 2024-11-24T03:45:30,899 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37883:37883),(127.0.0.1/127.0.0.1:41273:41273)] 2024-11-24T03:45:30,900 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:45:30,901 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T03:45:30,904 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T03:45:30,908 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T03:45:30,911 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T03:45:30,912 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:45:30,912 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T03:45:30,912 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T03:45:30,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T03:45:30,916 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T03:45:30,916 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:45:30,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:45:30,918 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T03:45:30,919 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T03:45:30,919 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:45:30,920 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:45:30,920 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T03:45:30,922 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T03:45:30,922 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:45:30,923 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:45:30,923 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T03:45:30,924 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T03:45:30,924 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:45:30,925 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:45:30,925 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T03:45:30,927 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740 2024-11-24T03:45:30,929 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740 2024-11-24T03:45:30,931 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T03:45:30,931 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T03:45:30,932 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T03:45:30,934 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T03:45:30,936 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=811001, jitterRate=0.031241148710250854}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T03:45:30,936 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T03:45:30,937 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732419930912Writing region info on filesystem at 1732419930913 (+1 ms)Initializing all the Stores at 1732419930914 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732419930914Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732419930915 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732419930915Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732419930915Cleaning up temporary data from old regions at 1732419930931 (+16 ms)Running coprocessor post-open hooks at 1732419930936 (+5 ms)Region opened successfully at 1732419930937 (+1 ms) 2024-11-24T03:45:30,943 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732419930859 2024-11-24T03:45:30,953 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T03:45:30,953 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T03:45:30,955 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=71d8d2d6408d,39103,1732419927868 2024-11-24T03:45:30,957 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 71d8d2d6408d,39103,1732419927868, state=OPEN 2024-11-24T03:45:31,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T03:45:31,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T03:45:31,020 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:45:31,020 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:45:31,020 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=71d8d2d6408d,39103,1732419927868 2024-11-24T03:45:31,026 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T03:45:31,027 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=71d8d2d6408d,39103,1732419927868 in 334 msec 2024-11-24T03:45:31,034 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T03:45:31,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 601 msec 2024-11-24T03:45:31,036 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:45:31,036 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T03:45:31,057 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T03:45:31,058 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=71d8d2d6408d,39103,1732419927868, seqNum=-1] 2024-11-24T03:45:31,076 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T03:45:31,077 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54243, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T03:45:31,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.6820 sec 2024-11-24T03:45:31,097 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732419931096, completionTime=-1 2024-11-24T03:45:31,099 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T03:45:31,099 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T03:45:31,127 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T03:45:31,127 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732419991127 2024-11-24T03:45:31,127 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732420051127 2024-11-24T03:45:31,127 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 28 msec 2024-11-24T03:45:31,130 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,43787,1732419926986-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:31,130 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,43787,1732419926986-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:31,131 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,43787,1732419926986-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:31,132 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-71d8d2d6408d:43787, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:31,132 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:31,133 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:31,139 DEBUG [master/71d8d2d6408d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T03:45:31,159 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.099sec 2024-11-24T03:45:31,160 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T03:45:31,161 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T03:45:31,161 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T03:45:31,162 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T03:45:31,162 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T03:45:31,163 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,43787,1732419926986-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T03:45:31,163 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,43787,1732419926986-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T03:45:31,171 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T03:45:31,172 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T03:45:31,172 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,43787,1732419926986-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:45:31,202 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2961789e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:45:31,204 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-24T03:45:31,204 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-24T03:45:31,207 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 71d8d2d6408d,43787,-1 for getting cluster id 2024-11-24T03:45:31,209 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T03:45:31,216 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '66cc5583-df64-4269-83ef-db65d3a3b4bf' 2024-11-24T03:45:31,218 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T03:45:31,219 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "66cc5583-df64-4269-83ef-db65d3a3b4bf" 2024-11-24T03:45:31,219 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@134cb0cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:45:31,219 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [71d8d2d6408d,43787,-1] 2024-11-24T03:45:31,221 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T03:45:31,223 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:45:31,224 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33784, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T03:45:31,227 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9a039b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:45:31,227 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T03:45:31,234 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=71d8d2d6408d,39103,1732419927868, seqNum=-1] 2024-11-24T03:45:31,235 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T03:45:31,237 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46030, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T03:45:31,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=71d8d2d6408d,43787,1732419926986 2024-11-24T03:45:31,257 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:45:31,263 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T03:45:31,267 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T03:45:31,272 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 71d8d2d6408d,43787,1732419926986 2024-11-24T03:45:31,274 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7b956548 2024-11-24T03:45:31,275 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T03:45:31,277 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33796, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T03:45:31,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43787 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T03:45:31,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43787 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T03:45:31,283 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43787 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T03:45:31,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43787 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-24T03:45:31,292 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T03:45:31,294 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43787 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-24T03:45:31,294 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:45:31,296 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T03:45:31,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43787 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T03:45:31,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741835_1011 (size=389) 2024-11-24T03:45:31,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741835_1011 (size=389) 2024-11-24T03:45:31,346 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0001faa9bb9158a1735ce4f5ec2ad86a, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070 2024-11-24T03:45:31,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741836_1012 (size=72) 2024-11-24T03:45:31,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741836_1012 (size=72) 2024-11-24T03:45:31,359 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:45:31,359 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 0001faa9bb9158a1735ce4f5ec2ad86a, disabling compactions & flushes 2024-11-24T03:45:31,359 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. 2024-11-24T03:45:31,359 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. 2024-11-24T03:45:31,360 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. after waiting 0 ms 2024-11-24T03:45:31,360 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. 2024-11-24T03:45:31,360 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. 2024-11-24T03:45:31,360 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0001faa9bb9158a1735ce4f5ec2ad86a: Waiting for close lock at 1732419931359Disabling compacts and flushes for region at 1732419931359Disabling writes for close at 1732419931360 (+1 ms)Writing region close event to WAL at 1732419931360Closed at 1732419931360 2024-11-24T03:45:31,362 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T03:45:31,367 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732419931362"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732419931362"}]},"ts":"1732419931362"} 2024-11-24T03:45:31,373 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T03:45:31,375 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T03:45:31,378 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732419931376"}]},"ts":"1732419931376"} 2024-11-24T03:45:31,383 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-24T03:45:31,386 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=0001faa9bb9158a1735ce4f5ec2ad86a, ASSIGN}] 2024-11-24T03:45:31,388 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=0001faa9bb9158a1735ce4f5ec2ad86a, ASSIGN 2024-11-24T03:45:31,391 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=0001faa9bb9158a1735ce4f5ec2ad86a, ASSIGN; state=OFFLINE, location=71d8d2d6408d,39103,1732419927868; forceNewPlan=false, retain=false 2024-11-24T03:45:31,543 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0001faa9bb9158a1735ce4f5ec2ad86a, regionState=OPENING, regionLocation=71d8d2d6408d,39103,1732419927868 2024-11-24T03:45:31,551 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=0001faa9bb9158a1735ce4f5ec2ad86a, ASSIGN because future has completed 2024-11-24T03:45:31,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0001faa9bb9158a1735ce4f5ec2ad86a, server=71d8d2d6408d,39103,1732419927868}] 2024-11-24T03:45:31,721 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. 2024-11-24T03:45:31,721 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0001faa9bb9158a1735ce4f5ec2ad86a, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a.', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:45:31,722 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 0001faa9bb9158a1735ce4f5ec2ad86a 2024-11-24T03:45:31,722 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:45:31,723 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0001faa9bb9158a1735ce4f5ec2ad86a 2024-11-24T03:45:31,723 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0001faa9bb9158a1735ce4f5ec2ad86a 2024-11-24T03:45:31,725 INFO [StoreOpener-0001faa9bb9158a1735ce4f5ec2ad86a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0001faa9bb9158a1735ce4f5ec2ad86a 2024-11-24T03:45:31,727 INFO [StoreOpener-0001faa9bb9158a1735ce4f5ec2ad86a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0001faa9bb9158a1735ce4f5ec2ad86a columnFamilyName info 2024-11-24T03:45:31,728 DEBUG [StoreOpener-0001faa9bb9158a1735ce4f5ec2ad86a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:45:31,729 INFO [StoreOpener-0001faa9bb9158a1735ce4f5ec2ad86a-1 {}] regionserver.HStore(327): Store=0001faa9bb9158a1735ce4f5ec2ad86a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:45:31,729 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0001faa9bb9158a1735ce4f5ec2ad86a 2024-11-24T03:45:31,730 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a 2024-11-24T03:45:31,731 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a 2024-11-24T03:45:31,732 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0001faa9bb9158a1735ce4f5ec2ad86a 2024-11-24T03:45:31,732 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0001faa9bb9158a1735ce4f5ec2ad86a 2024-11-24T03:45:31,735 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0001faa9bb9158a1735ce4f5ec2ad86a 2024-11-24T03:45:31,738 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:45:31,739 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0001faa9bb9158a1735ce4f5ec2ad86a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694723, jitterRate=-0.11661458015441895}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T03:45:31,739 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0001faa9bb9158a1735ce4f5ec2ad86a 2024-11-24T03:45:31,740 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0001faa9bb9158a1735ce4f5ec2ad86a: Running coprocessor pre-open hook at 1732419931723Writing region info on filesystem at 1732419931723Initializing all the Stores at 1732419931724 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732419931725 (+1 ms)Cleaning up temporary data from old regions at 1732419931732 (+7 ms)Running coprocessor post-open hooks at 1732419931739 (+7 ms)Region opened successfully at 1732419931740 (+1 ms) 2024-11-24T03:45:31,742 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a., pid=6, masterSystemTime=1732419931707 2024-11-24T03:45:31,746 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. 2024-11-24T03:45:31,747 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. 2024-11-24T03:45:31,748 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0001faa9bb9158a1735ce4f5ec2ad86a, regionState=OPEN, openSeqNum=2, regionLocation=71d8d2d6408d,39103,1732419927868 2024-11-24T03:45:31,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0001faa9bb9158a1735ce4f5ec2ad86a, server=71d8d2d6408d,39103,1732419927868 because future has completed 2024-11-24T03:45:31,754 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43787 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=71d8d2d6408d,39103,1732419927868, table=TestLogRolling-testSlowSyncLogRolling, region=0001faa9bb9158a1735ce4f5ec2ad86a. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-24T03:45:31,759 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T03:45:31,759 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0001faa9bb9158a1735ce4f5ec2ad86a, server=71d8d2d6408d,39103,1732419927868 in 202 msec 2024-11-24T03:45:31,763 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T03:45:31,763 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=0001faa9bb9158a1735ce4f5ec2ad86a, ASSIGN in 374 msec 2024-11-24T03:45:31,765 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T03:45:31,765 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732419931765"}]},"ts":"1732419931765"} 2024-11-24T03:45:31,769 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-24T03:45:31,770 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T03:45:31,774 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 486 msec 2024-11-24T03:45:35,625 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T03:45:35,627 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-24T03:45:36,423 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-24T03:45:37,407 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T03:45:37,407 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T03:45:37,410 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-24T03:45:37,410 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T03:45:37,411 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T03:45:37,411 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T03:45:37,411 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T03:45:37,411 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T03:45:41,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43787 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T03:45:41,324 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-24T03:45:41,330 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-24T03:45:41,340 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-24T03:45:41,341 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. 2024-11-24T03:45:41,341 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C39103%2C1732419927868.1732419941341 2024-11-24T03:45:41,350 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:45:41,350 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:45:41,350 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:45:41,350 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:45:41,350 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:45:41,351 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419929823 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419941341 2024-11-24T03:45:41,352 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37883:37883),(127.0.0.1/127.0.0.1:41273:41273)] 2024-11-24T03:45:41,352 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419929823 is not closed yet, will try archiving it next time 2024-11-24T03:45:41,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741832_1008 (size=451) 2024-11-24T03:45:41,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741832_1008 (size=451) 2024-11-24T03:45:41,360 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419929823 to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/oldWALs/71d8d2d6408d%2C39103%2C1732419927868.1732419929823 2024-11-24T03:45:41,364 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a., hostname=71d8d2d6408d,39103,1732419927868, seqNum=2] 2024-11-24T03:45:53,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39103 {}] regionserver.HRegion(8855): Flush requested on 0001faa9bb9158a1735ce4f5ec2ad86a 2024-11-24T03:45:53,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0001faa9bb9158a1735ce4f5ec2ad86a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T03:45:53,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/bd60bc6b71ae4c72a2f70e5660495447 is 1080, key is row0001/info:/1732419941367/Put/seqid=0 2024-11-24T03:45:53,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741838_1014 (size=12509) 2024-11-24T03:45:53,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741838_1014 (size=12509) 2024-11-24T03:45:53,878 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/bd60bc6b71ae4c72a2f70e5660495447 2024-11-24T03:45:53,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/bd60bc6b71ae4c72a2f70e5660495447 as hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/bd60bc6b71ae4c72a2f70e5660495447 2024-11-24T03:45:53,937 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/bd60bc6b71ae4c72a2f70e5660495447, entries=7, sequenceid=11, filesize=12.2 K 2024-11-24T03:45:53,945 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 0001faa9bb9158a1735ce4f5ec2ad86a in 535ms, sequenceid=11, compaction requested=false 2024-11-24T03:45:53,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0001faa9bb9158a1735ce4f5ec2ad86a: 2024-11-24T03:45:55,955 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T03:46:01,429 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C39103%2C1732419927868.1732419961428 2024-11-24T03:46:01,646 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 211 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK], DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK]] 2024-11-24T03:46:01,647 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:01,647 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:01,647 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:01,647 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:01,647 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:01,647 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419941341 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419961428 2024-11-24T03:46:01,649 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41273:41273),(127.0.0.1/127.0.0.1:37883:37883)] 2024-11-24T03:46:01,649 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419941341 is not closed yet, will try archiving it next time 2024-11-24T03:46:01,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741837_1013 (size=12399) 2024-11-24T03:46:01,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741837_1013 (size=12399) 2024-11-24T03:46:01,853 INFO [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:04,062 INFO [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:06,267 INFO [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:08,476 INFO [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:08,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39103 {}] regionserver.HRegion(8855): Flush requested on 0001faa9bb9158a1735ce4f5ec2ad86a 2024-11-24T03:46:08,478 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0001faa9bb9158a1735ce4f5ec2ad86a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T03:46:08,682 INFO [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:08,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/f87f09268c0c42df92710c20d5969c93 is 1080, key is row0008/info:/1732419955409/Put/seqid=0 2024-11-24T03:46:08,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741840_1016 (size=12509) 2024-11-24T03:46:08,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741840_1016 (size=12509) 2024-11-24T03:46:08,703 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/f87f09268c0c42df92710c20d5969c93 2024-11-24T03:46:08,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/f87f09268c0c42df92710c20d5969c93 as hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/f87f09268c0c42df92710c20d5969c93 2024-11-24T03:46:08,723 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/f87f09268c0c42df92710c20d5969c93, entries=7, sequenceid=21, filesize=12.2 K 2024-11-24T03:46:08,927 INFO [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:08,927 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 0001faa9bb9158a1735ce4f5ec2ad86a in 450ms, sequenceid=21, compaction requested=false 2024-11-24T03:46:08,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0001faa9bb9158a1735ce4f5ec2ad86a: 2024-11-24T03:46:08,928 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-24T03:46:08,929 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:46:08,931 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/bd60bc6b71ae4c72a2f70e5660495447 because midkey is the same as first or last row 2024-11-24T03:46:10,683 INFO [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:12,063 INFO [master/71d8d2d6408d:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T03:46:12,063 INFO [master/71d8d2d6408d:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T03:46:12,889 INFO [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:12,893 WARN [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:12,895 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 71d8d2d6408d%2C39103%2C1732419927868:(num 1732419961428) roll requested 2024-11-24T03:46:12,895 INFO [regionserver/71d8d2d6408d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C39103%2C1732419927868.1732419972895 2024-11-24T03:46:13,103 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:13,103 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:13,103 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:13,103 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:13,104 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:13,104 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:13,104 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419961428 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419972895 2024-11-24T03:46:13,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741839_1015 (size=7739) 2024-11-24T03:46:13,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741839_1015 (size=7739) 2024-11-24T03:46:13,108 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37883:37883),(127.0.0.1/127.0.0.1:41273:41273)] 2024-11-24T03:46:13,108 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419961428 is not closed yet, will try archiving it next time 2024-11-24T03:46:13,108 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419941341 to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/oldWALs/71d8d2d6408d%2C39103%2C1732419927868.1732419941341 2024-11-24T03:46:15,093 INFO [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK], DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK]] 2024-11-24T03:46:16,723 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0001faa9bb9158a1735ce4f5ec2ad86a, had cached 0 bytes from a total of 25018 2024-11-24T03:46:17,298 INFO [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK], DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK]] 2024-11-24T03:46:19,502 INFO [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK], DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK]] 2024-11-24T03:46:21,707 INFO [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK], DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK]] 2024-11-24T03:46:23,708 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T03:46:23,709 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C39103%2C1732419927868.1732419983709 2024-11-24T03:46:25,956 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T03:46:28,724 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5011 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK], DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK]] 2024-11-24T03:46:28,726 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5011 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK], DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK]] 2024-11-24T03:46:28,726 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 71d8d2d6408d%2C39103%2C1732419927868:(num 1732419983709) roll requested 2024-11-24T03:46:28,726 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:28,727 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:28,727 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:28,727 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:28,727 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:28,727 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419972895 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419983709 2024-11-24T03:46:28,728 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41273:41273),(127.0.0.1/127.0.0.1:37883:37883)] 2024-11-24T03:46:28,728 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419972895 is not closed yet, will try archiving it next time 2024-11-24T03:46:28,729 INFO [regionserver/71d8d2d6408d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C39103%2C1732419927868.1732419988728 2024-11-24T03:46:28,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741841_1017 (size=4753) 2024-11-24T03:46:28,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741841_1017 (size=4753) 2024-11-24T03:46:33,735 INFO [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5003 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:33,735 WARN [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5003 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:33,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39103 {}] regionserver.HRegion(8855): Flush requested on 0001faa9bb9158a1735ce4f5ec2ad86a 2024-11-24T03:46:33,737 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0001faa9bb9158a1735ce4f5ec2ad86a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T03:46:33,744 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5011 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:33,744 WARN [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5011 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:35,737 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T03:46:38,740 INFO [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:38,740 WARN [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:38,740 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:38,741 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:38,741 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:38,741 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:38,741 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:38,741 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419983709 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419988728 2024-11-24T03:46:38,743 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41273:41273),(127.0.0.1/127.0.0.1:37883:37883)] 2024-11-24T03:46:38,743 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419983709 is not closed yet, will try archiving it next time 2024-11-24T03:46:38,744 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 71d8d2d6408d%2C39103%2C1732419927868:(num 1732419988728) roll requested 2024-11-24T03:46:38,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741842_1018 (size=1569) 2024-11-24T03:46:38,744 INFO [regionserver/71d8d2d6408d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C39103%2C1732419927868.1732419998744 2024-11-24T03:46:38,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741842_1018 (size=1569) 2024-11-24T03:46:38,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/9aa4e576c92048b596a909c1f74b9725 is 1080, key is row0015/info:/1732419970480/Put/seqid=0 2024-11-24T03:46:38,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741844_1020 (size=12509) 2024-11-24T03:46:38,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741844_1020 (size=12509) 2024-11-24T03:46:38,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/9aa4e576c92048b596a909c1f74b9725 2024-11-24T03:46:38,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/9aa4e576c92048b596a909c1f74b9725 as hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/9aa4e576c92048b596a909c1f74b9725 2024-11-24T03:46:38,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/9aa4e576c92048b596a909c1f74b9725, entries=7, sequenceid=31, filesize=12.2 K 2024-11-24T03:46:43,756 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:43,756 WARN [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:43,803 INFO [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:43,803 WARN [FSHLog-0-hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070-prefix:71d8d2d6408d,39103,1732419927868 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35221,DS-c57555de-8cce-41f7-bfae-b61a6e924ace,DISK], DatanodeInfoWithStorage[127.0.0.1:38065,DS-99dcd8d2-9c10-42f3-b8f8-542a0aecd77e,DISK]] 2024-11-24T03:46:43,803 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 0001faa9bb9158a1735ce4f5ec2ad86a in 10067ms, sequenceid=31, compaction requested=true 2024-11-24T03:46:43,803 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:43,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0001faa9bb9158a1735ce4f5ec2ad86a: 2024-11-24T03:46:43,803 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:43,803 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:43,803 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-24T03:46:43,803 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:46:43,803 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:43,803 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/bd60bc6b71ae4c72a2f70e5660495447 because midkey is the same as first or last row 2024-11-24T03:46:43,804 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:43,804 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419988728 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419998744 2024-11-24T03:46:43,805 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41273:41273),(127.0.0.1/127.0.0.1:37883:37883)] 2024-11-24T03:46:43,805 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419988728 is not closed yet, will try archiving it next time 2024-11-24T03:46:43,806 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419961428 to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/oldWALs/71d8d2d6408d%2C39103%2C1732419927868.1732419961428 2024-11-24T03:46:43,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0001faa9bb9158a1735ce4f5ec2ad86a:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T03:46:43,806 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 71d8d2d6408d%2C39103%2C1732419927868:(num 1732420003806) roll requested 2024-11-24T03:46:43,806 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C39103%2C1732419927868.1732420003806 2024-11-24T03:46:43,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741843_1019 (size=438) 2024-11-24T03:46:43,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741843_1019 (size=438) 2024-11-24T03:46:43,809 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419972895 to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/oldWALs/71d8d2d6408d%2C39103%2C1732419927868.1732419972895 2024-11-24T03:46:43,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:46:43,809 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T03:46:43,811 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419983709 to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/oldWALs/71d8d2d6408d%2C39103%2C1732419927868.1732419983709 2024-11-24T03:46:43,813 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419988728 to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/oldWALs/71d8d2d6408d%2C39103%2C1732419927868.1732419988728 2024-11-24T03:46:43,814 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T03:46:43,815 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.HStore(1541): 0001faa9bb9158a1735ce4f5ec2ad86a/info is initiating minor compaction (all files) 2024-11-24T03:46:43,816 INFO [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0001faa9bb9158a1735ce4f5ec2ad86a/info in TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. 2024-11-24T03:46:43,816 INFO [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/bd60bc6b71ae4c72a2f70e5660495447, hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/f87f09268c0c42df92710c20d5969c93, hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/9aa4e576c92048b596a909c1f74b9725] into tmpdir=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp, totalSize=36.6 K 2024-11-24T03:46:43,817 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] compactions.Compactor(225): Compacting bd60bc6b71ae4c72a2f70e5660495447, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732419941367 2024-11-24T03:46:43,818 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] compactions.Compactor(225): Compacting f87f09268c0c42df92710c20d5969c93, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732419955409 2024-11-24T03:46:43,819 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9aa4e576c92048b596a909c1f74b9725, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732419970480 2024-11-24T03:46:43,822 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:43,822 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:43,822 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:43,823 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:43,823 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:43,823 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419998744 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732420003806 2024-11-24T03:46:43,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741845_1021 (size=93) 2024-11-24T03:46:43,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741845_1021 (size=93) 2024-11-24T03:46:43,826 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732419998744 to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/oldWALs/71d8d2d6408d%2C39103%2C1732419927868.1732419998744 2024-11-24T03:46:43,832 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41273:41273),(127.0.0.1/127.0.0.1:37883:37883)] 2024-11-24T03:46:43,832 INFO [regionserver/71d8d2d6408d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C39103%2C1732419927868.1732420003832 2024-11-24T03:46:43,844 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:43,844 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:43,844 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:43,844 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:43,844 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:46:43,845 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732420003806 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/WALs/71d8d2d6408d,39103,1732419927868/71d8d2d6408d%2C39103%2C1732419927868.1732420003832 2024-11-24T03:46:43,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741846_1022 (size=1258) 2024-11-24T03:46:43,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741846_1022 (size=1258) 2024-11-24T03:46:43,849 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37883:37883),(127.0.0.1/127.0.0.1:41273:41273)] 2024-11-24T03:46:43,854 INFO [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0001faa9bb9158a1735ce4f5ec2ad86a#info#compaction#3 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T03:46:43,855 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/90392f80369a4cd19321c663725db6ac is 1080, key is row0001/info:/1732419941367/Put/seqid=0 2024-11-24T03:46:43,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741848_1024 (size=27710) 2024-11-24T03:46:43,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741848_1024 (size=27710) 2024-11-24T03:46:43,872 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/90392f80369a4cd19321c663725db6ac as hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/90392f80369a4cd19321c663725db6ac 2024-11-24T03:46:43,888 INFO [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0001faa9bb9158a1735ce4f5ec2ad86a/info of 0001faa9bb9158a1735ce4f5ec2ad86a into 90392f80369a4cd19321c663725db6ac(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T03:46:43,888 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0001faa9bb9158a1735ce4f5ec2ad86a: 2024-11-24T03:46:43,889 INFO [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a., storeName=0001faa9bb9158a1735ce4f5ec2ad86a/info, priority=13, startTime=1732420003805; duration=0sec 2024-11-24T03:46:43,890 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T03:46:43,890 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:46:43,890 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/90392f80369a4cd19321c663725db6ac because midkey is the same as first or last row 2024-11-24T03:46:43,890 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T03:46:43,890 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:46:43,890 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/90392f80369a4cd19321c663725db6ac because midkey is the same as first or last row 2024-11-24T03:46:43,890 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T03:46:43,890 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:46:43,890 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/90392f80369a4cd19321c663725db6ac because midkey is the same as first or last row 2024-11-24T03:46:43,890 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:46:43,891 DEBUG [RS:0;71d8d2d6408d:39103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0001faa9bb9158a1735ce4f5ec2ad86a:info 2024-11-24T03:46:55,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39103 {}] regionserver.HRegion(8855): Flush requested on 0001faa9bb9158a1735ce4f5ec2ad86a 2024-11-24T03:46:55,871 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0001faa9bb9158a1735ce4f5ec2ad86a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T03:46:55,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/5b28d59e0d2f433c9cf683f6b1591380 is 1080, key is row0022/info:/1732420003833/Put/seqid=0 2024-11-24T03:46:55,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741849_1025 (size=12509) 2024-11-24T03:46:55,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741849_1025 (size=12509) 2024-11-24T03:46:55,886 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/5b28d59e0d2f433c9cf683f6b1591380 2024-11-24T03:46:55,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/5b28d59e0d2f433c9cf683f6b1591380 as hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/5b28d59e0d2f433c9cf683f6b1591380 2024-11-24T03:46:55,905 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/5b28d59e0d2f433c9cf683f6b1591380, entries=7, sequenceid=42, filesize=12.2 K 2024-11-24T03:46:55,907 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 0001faa9bb9158a1735ce4f5ec2ad86a in 36ms, sequenceid=42, compaction requested=false 2024-11-24T03:46:55,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0001faa9bb9158a1735ce4f5ec2ad86a: 2024-11-24T03:46:55,907 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-24T03:46:55,907 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:46:55,907 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/90392f80369a4cd19321c663725db6ac because midkey is the same as first or last row 2024-11-24T03:46:55,956 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T03:47:01,723 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0001faa9bb9158a1735ce4f5ec2ad86a, had cached 0 bytes from a total of 40219 2024-11-24T03:47:03,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T03:47:03,888 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T03:47:03,888 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:47:03,895 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:03,895 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:03,896 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T03:47:03,896 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T03:47:03,896 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=698085998, stopped=false 2024-11-24T03:47:03,896 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=71d8d2d6408d,43787,1732419926986 2024-11-24T03:47:03,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T03:47:03,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T03:47:03,948 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T03:47:03,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:03,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:03,949 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T03:47:03,949 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:47:03,949 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:03,950 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:47:03,950 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:47:03,950 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '71d8d2d6408d,39103,1732419927868' ***** 2024-11-24T03:47:03,950 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T03:47:03,951 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T03:47:03,951 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T03:47:03,951 INFO [RS:0;71d8d2d6408d:39103 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T03:47:03,951 INFO [RS:0;71d8d2d6408d:39103 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T03:47:03,952 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(3091): Received CLOSE for 0001faa9bb9158a1735ce4f5ec2ad86a 2024-11-24T03:47:03,953 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(959): stopping server 71d8d2d6408d,39103,1732419927868 2024-11-24T03:47:03,953 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T03:47:03,953 INFO [RS:0;71d8d2d6408d:39103 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;71d8d2d6408d:39103. 2024-11-24T03:47:03,953 DEBUG [RS:0;71d8d2d6408d:39103 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:47:03,954 DEBUG [RS:0;71d8d2d6408d:39103 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:03,954 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T03:47:03,954 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0001faa9bb9158a1735ce4f5ec2ad86a, disabling compactions & flushes 2024-11-24T03:47:03,954 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T03:47:03,954 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T03:47:03,954 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. 2024-11-24T03:47:03,954 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. 2024-11-24T03:47:03,954 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. after waiting 0 ms 2024-11-24T03:47:03,954 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T03:47:03,954 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. 2024-11-24T03:47:03,954 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 0001faa9bb9158a1735ce4f5ec2ad86a 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-24T03:47:03,955 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T03:47:03,955 DEBUG [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 0001faa9bb9158a1735ce4f5ec2ad86a=TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a.} 2024-11-24T03:47:03,955 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T03:47:03,955 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T03:47:03,955 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T03:47:03,955 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T03:47:03,955 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T03:47:03,955 DEBUG [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(1351): Waiting on 0001faa9bb9158a1735ce4f5ec2ad86a, 1588230740 2024-11-24T03:47:03,955 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-24T03:47:03,962 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/3b9cb6ea4d4b4d719ab6846393ef9fb2 is 1080, key is row0029/info:/1732420017873/Put/seqid=0 2024-11-24T03:47:03,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741850_1026 (size=8193) 2024-11-24T03:47:03,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741850_1026 (size=8193) 2024-11-24T03:47:03,970 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/3b9cb6ea4d4b4d719ab6846393ef9fb2 2024-11-24T03:47:03,981 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/.tmp/info/157a195aac5f48a1bad55cc5651bc8bc is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a./info:regioninfo/1732419931747/Put/seqid=0 2024-11-24T03:47:03,985 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/.tmp/info/3b9cb6ea4d4b4d719ab6846393ef9fb2 as hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/3b9cb6ea4d4b4d719ab6846393ef9fb2 2024-11-24T03:47:03,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741851_1027 (size=7016) 2024-11-24T03:47:03,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741851_1027 (size=7016) 2024-11-24T03:47:03,993 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/.tmp/info/157a195aac5f48a1bad55cc5651bc8bc 2024-11-24T03:47:03,996 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/3b9cb6ea4d4b4d719ab6846393ef9fb2, entries=3, sequenceid=48, filesize=8.0 K 2024-11-24T03:47:03,998 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 0001faa9bb9158a1735ce4f5ec2ad86a in 43ms, sequenceid=48, compaction requested=true 2024-11-24T03:47:03,999 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/bd60bc6b71ae4c72a2f70e5660495447, hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/f87f09268c0c42df92710c20d5969c93, hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/9aa4e576c92048b596a909c1f74b9725] to archive 2024-11-24T03:47:04,002 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T03:47:04,007 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/bd60bc6b71ae4c72a2f70e5660495447 to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/archive/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/bd60bc6b71ae4c72a2f70e5660495447 2024-11-24T03:47:04,010 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/f87f09268c0c42df92710c20d5969c93 to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/archive/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/f87f09268c0c42df92710c20d5969c93 2024-11-24T03:47:04,013 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/9aa4e576c92048b596a909c1f74b9725 to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/archive/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/info/9aa4e576c92048b596a909c1f74b9725 2024-11-24T03:47:04,026 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/.tmp/ns/f7f3c3e7635b40afa32f214b1430e95b is 43, key is default/ns:d/1732419931082/Put/seqid=0 2024-11-24T03:47:04,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741852_1028 (size=5153) 2024-11-24T03:47:04,029 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=71d8d2d6408d:43787 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T03:47:04,035 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [bd60bc6b71ae4c72a2f70e5660495447=12509, f87f09268c0c42df92710c20d5969c93=12509, 9aa4e576c92048b596a909c1f74b9725=12509] 2024-11-24T03:47:04,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741852_1028 (size=5153) 2024-11-24T03:47:04,035 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/.tmp/ns/f7f3c3e7635b40afa32f214b1430e95b 2024-11-24T03:47:04,043 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/default/TestLogRolling-testSlowSyncLogRolling/0001faa9bb9158a1735ce4f5ec2ad86a/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-24T03:47:04,046 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. 2024-11-24T03:47:04,046 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0001faa9bb9158a1735ce4f5ec2ad86a: Waiting for close lock at 1732420023953Running coprocessor pre-close hooks at 1732420023954 (+1 ms)Disabling compacts and flushes for region at 1732420023954Disabling writes for close at 1732420023954Obtaining lock to block concurrent updates at 1732420023955 (+1 ms)Preparing flush snapshotting stores in 0001faa9bb9158a1735ce4f5ec2ad86a at 1732420023955Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732420023955Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. at 1732420023957 (+2 ms)Flushing 0001faa9bb9158a1735ce4f5ec2ad86a/info: creating writer at 1732420023957Flushing 0001faa9bb9158a1735ce4f5ec2ad86a/info: appending metadata at 1732420023961 (+4 ms)Flushing 0001faa9bb9158a1735ce4f5ec2ad86a/info: closing flushed file at 1732420023961Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54df96f3: reopening flushed file at 1732420023983 (+22 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 0001faa9bb9158a1735ce4f5ec2ad86a in 43ms, sequenceid=48, compaction requested=true at 1732420023998 (+15 ms)Writing region close event to WAL at 1732420024037 (+39 ms)Running coprocessor post-close hooks at 1732420024044 (+7 ms)Closed at 1732420024046 (+2 ms) 2024-11-24T03:47:04,047 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732419931279.0001faa9bb9158a1735ce4f5ec2ad86a. 2024-11-24T03:47:04,063 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/.tmp/table/90c27d944e4544b8a9f01590e136b99c is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732419931765/Put/seqid=0 2024-11-24T03:47:04,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741853_1029 (size=5396) 2024-11-24T03:47:04,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741853_1029 (size=5396) 2024-11-24T03:47:04,155 DEBUG [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T03:47:04,356 DEBUG [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T03:47:04,474 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/.tmp/table/90c27d944e4544b8a9f01590e136b99c 2024-11-24T03:47:04,487 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/.tmp/info/157a195aac5f48a1bad55cc5651bc8bc as hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/info/157a195aac5f48a1bad55cc5651bc8bc 2024-11-24T03:47:04,497 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/info/157a195aac5f48a1bad55cc5651bc8bc, entries=10, sequenceid=11, filesize=6.9 K 2024-11-24T03:47:04,499 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/.tmp/ns/f7f3c3e7635b40afa32f214b1430e95b as hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/ns/f7f3c3e7635b40afa32f214b1430e95b 2024-11-24T03:47:04,508 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/ns/f7f3c3e7635b40afa32f214b1430e95b, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T03:47:04,510 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/.tmp/table/90c27d944e4544b8a9f01590e136b99c as hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/table/90c27d944e4544b8a9f01590e136b99c 2024-11-24T03:47:04,520 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/table/90c27d944e4544b8a9f01590e136b99c, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T03:47:04,522 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 567ms, sequenceid=11, compaction requested=false 2024-11-24T03:47:04,528 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T03:47:04,530 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T03:47:04,530 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T03:47:04,530 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732420023955Running coprocessor pre-close hooks at 1732420023955Disabling compacts and flushes for region at 1732420023955Disabling writes for close at 1732420023955Obtaining lock to block concurrent updates at 1732420023955Preparing flush snapshotting stores in 1588230740 at 1732420023955Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732420023956 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732420023957 (+1 ms)Flushing 1588230740/info: creating writer at 1732420023957Flushing 1588230740/info: appending metadata at 1732420023980 (+23 ms)Flushing 1588230740/info: closing flushed file at 1732420023980Flushing 1588230740/ns: creating writer at 1732420024003 (+23 ms)Flushing 1588230740/ns: appending metadata at 1732420024026 (+23 ms)Flushing 1588230740/ns: closing flushed file at 1732420024026Flushing 1588230740/table: creating writer at 1732420024046 (+20 ms)Flushing 1588230740/table: appending metadata at 1732420024062 (+16 ms)Flushing 1588230740/table: closing flushed file at 1732420024062Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@495056c0: reopening flushed file at 1732420024485 (+423 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c9e4b0: reopening flushed file at 1732420024497 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@152756d5: reopening flushed file at 1732420024508 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 567ms, sequenceid=11, compaction requested=false at 1732420024522 (+14 ms)Writing region close event to WAL at 1732420024523 (+1 ms)Running coprocessor post-close hooks at 1732420024529 (+6 ms)Closed at 1732420024530 (+1 ms) 2024-11-24T03:47:04,530 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T03:47:04,556 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(976): stopping server 71d8d2d6408d,39103,1732419927868; all regions closed. 2024-11-24T03:47:04,558 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:04,558 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:04,558 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:04,558 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:04,558 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:04,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741834_1010 (size=3066) 2024-11-24T03:47:04,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741834_1010 (size=3066) 2024-11-24T03:47:04,580 DEBUG [RS:0;71d8d2d6408d:39103 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/oldWALs 2024-11-24T03:47:04,580 INFO [RS:0;71d8d2d6408d:39103 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 71d8d2d6408d%2C39103%2C1732419927868.meta:.meta(num 1732419930887) 2024-11-24T03:47:04,582 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:04,582 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:04,582 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:04,582 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:04,582 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:04,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741847_1023 (size=12695) 2024-11-24T03:47:04,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741847_1023 (size=12695) 2024-11-24T03:47:04,590 DEBUG [RS:0;71d8d2d6408d:39103 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/oldWALs 2024-11-24T03:47:04,590 INFO [RS:0;71d8d2d6408d:39103 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 71d8d2d6408d%2C39103%2C1732419927868:(num 1732420003832) 2024-11-24T03:47:04,590 DEBUG [RS:0;71d8d2d6408d:39103 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:04,590 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T03:47:04,590 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T03:47:04,590 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.ChoreService(370): Chore service for: regionserver/71d8d2d6408d:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T03:47:04,591 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T03:47:04,591 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T03:47:04,591 INFO [RS:0;71d8d2d6408d:39103 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39103 2024-11-24T03:47:04,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/71d8d2d6408d,39103,1732419927868 2024-11-24T03:47:04,640 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T03:47:04,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T03:47:04,648 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [71d8d2d6408d,39103,1732419927868] 2024-11-24T03:47:04,656 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/71d8d2d6408d,39103,1732419927868 already deleted, retry=false 2024-11-24T03:47:04,657 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 71d8d2d6408d,39103,1732419927868 expired; onlineServers=0 2024-11-24T03:47:04,657 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '71d8d2d6408d,43787,1732419926986' ***** 2024-11-24T03:47:04,657 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T03:47:04,657 INFO [M:0;71d8d2d6408d:43787 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T03:47:04,657 INFO [M:0;71d8d2d6408d:43787 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T03:47:04,657 DEBUG [M:0;71d8d2d6408d:43787 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T03:47:04,657 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T03:47:04,657 DEBUG [M:0;71d8d2d6408d:43787 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T03:47:04,657 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732419929544 {}] cleaner.HFileCleaner(306): Exit Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732419929544,5,FailOnTimeoutGroup] 2024-11-24T03:47:04,657 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732419929547 {}] cleaner.HFileCleaner(306): Exit Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732419929547,5,FailOnTimeoutGroup] 2024-11-24T03:47:04,657 INFO [M:0;71d8d2d6408d:43787 {}] hbase.ChoreService(370): Chore service for: master/71d8d2d6408d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T03:47:04,658 INFO [M:0;71d8d2d6408d:43787 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T03:47:04,658 DEBUG [M:0;71d8d2d6408d:43787 {}] master.HMaster(1795): Stopping service threads 2024-11-24T03:47:04,658 INFO [M:0;71d8d2d6408d:43787 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T03:47:04,658 INFO [M:0;71d8d2d6408d:43787 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T03:47:04,658 INFO [M:0;71d8d2d6408d:43787 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T03:47:04,659 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T03:47:04,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T03:47:04,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:04,665 DEBUG [M:0;71d8d2d6408d:43787 {}] zookeeper.ZKUtil(347): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T03:47:04,665 WARN [M:0;71d8d2d6408d:43787 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T03:47:04,666 INFO [M:0;71d8d2d6408d:43787 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/.lastflushedseqids 2024-11-24T03:47:04,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741854_1030 (size=130) 2024-11-24T03:47:04,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741854_1030 (size=130) 2024-11-24T03:47:04,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:47:04,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39103-0x1016c3bcae90001, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:47:04,749 INFO [RS:0;71d8d2d6408d:39103 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T03:47:04,749 INFO [RS:0;71d8d2d6408d:39103 {}] regionserver.HRegionServer(1031): Exiting; stopping=71d8d2d6408d,39103,1732419927868; zookeeper connection closed. 2024-11-24T03:47:04,750 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@721f8e08 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@721f8e08 2024-11-24T03:47:04,750 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T03:47:05,082 INFO [M:0;71d8d2d6408d:43787 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T03:47:05,083 INFO [M:0;71d8d2d6408d:43787 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T03:47:05,083 DEBUG [M:0;71d8d2d6408d:43787 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T03:47:05,083 INFO [M:0;71d8d2d6408d:43787 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:05,083 DEBUG [M:0;71d8d2d6408d:43787 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:05,083 DEBUG [M:0;71d8d2d6408d:43787 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T03:47:05,083 DEBUG [M:0;71d8d2d6408d:43787 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:05,084 INFO [M:0;71d8d2d6408d:43787 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-24T03:47:05,104 DEBUG [M:0;71d8d2d6408d:43787 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b5b259ddb7e94a6e95a5a30e1dc434b0 is 82, key is hbase:meta,,1/info:regioninfo/1732419930954/Put/seqid=0 2024-11-24T03:47:05,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741855_1031 (size=5672) 2024-11-24T03:47:05,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741855_1031 (size=5672) 2024-11-24T03:47:05,121 INFO [M:0;71d8d2d6408d:43787 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b5b259ddb7e94a6e95a5a30e1dc434b0 2024-11-24T03:47:05,145 DEBUG [M:0;71d8d2d6408d:43787 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fb08ebeb8301415e9fcd0961d7d089b2 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732419931773/Put/seqid=0 2024-11-24T03:47:05,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741856_1032 (size=6248) 2024-11-24T03:47:05,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741856_1032 (size=6248) 2024-11-24T03:47:05,152 INFO [M:0;71d8d2d6408d:43787 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fb08ebeb8301415e9fcd0961d7d089b2 2024-11-24T03:47:05,160 INFO [M:0;71d8d2d6408d:43787 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fb08ebeb8301415e9fcd0961d7d089b2 2024-11-24T03:47:05,181 DEBUG [M:0;71d8d2d6408d:43787 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d8a8e515c9b84cc1b945c26cb41e160b is 69, key is 71d8d2d6408d,39103,1732419927868/rs:state/1732419929561/Put/seqid=0 2024-11-24T03:47:05,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741857_1033 (size=5156) 2024-11-24T03:47:05,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741857_1033 (size=5156) 2024-11-24T03:47:05,592 INFO [M:0;71d8d2d6408d:43787 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d8a8e515c9b84cc1b945c26cb41e160b 2024-11-24T03:47:05,619 DEBUG [M:0;71d8d2d6408d:43787 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4284a3e5dacf434c9fa5ea32586b4496 is 52, key is load_balancer_on/state:d/1732419931260/Put/seqid=0 2024-11-24T03:47:05,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741858_1034 (size=5056) 2024-11-24T03:47:05,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741858_1034 (size=5056) 2024-11-24T03:47:05,625 INFO [M:0;71d8d2d6408d:43787 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4284a3e5dacf434c9fa5ea32586b4496 2024-11-24T03:47:05,633 DEBUG [M:0;71d8d2d6408d:43787 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b5b259ddb7e94a6e95a5a30e1dc434b0 as hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b5b259ddb7e94a6e95a5a30e1dc434b0 2024-11-24T03:47:05,642 INFO [M:0;71d8d2d6408d:43787 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b5b259ddb7e94a6e95a5a30e1dc434b0, entries=8, sequenceid=59, filesize=5.5 K 2024-11-24T03:47:05,644 DEBUG [M:0;71d8d2d6408d:43787 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fb08ebeb8301415e9fcd0961d7d089b2 as hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fb08ebeb8301415e9fcd0961d7d089b2 2024-11-24T03:47:05,653 INFO [M:0;71d8d2d6408d:43787 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fb08ebeb8301415e9fcd0961d7d089b2 2024-11-24T03:47:05,653 INFO [M:0;71d8d2d6408d:43787 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fb08ebeb8301415e9fcd0961d7d089b2, entries=6, sequenceid=59, filesize=6.1 K 2024-11-24T03:47:05,654 DEBUG [M:0;71d8d2d6408d:43787 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d8a8e515c9b84cc1b945c26cb41e160b as hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d8a8e515c9b84cc1b945c26cb41e160b 2024-11-24T03:47:05,666 INFO [regionserver/71d8d2d6408d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T03:47:05,671 INFO [M:0;71d8d2d6408d:43787 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d8a8e515c9b84cc1b945c26cb41e160b, entries=1, sequenceid=59, filesize=5.0 K 2024-11-24T03:47:05,673 DEBUG [M:0;71d8d2d6408d:43787 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4284a3e5dacf434c9fa5ea32586b4496 as hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4284a3e5dacf434c9fa5ea32586b4496 2024-11-24T03:47:05,681 INFO [M:0;71d8d2d6408d:43787 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4284a3e5dacf434c9fa5ea32586b4496, entries=1, sequenceid=59, filesize=4.9 K 2024-11-24T03:47:05,682 INFO [M:0;71d8d2d6408d:43787 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 599ms, sequenceid=59, compaction requested=false 2024-11-24T03:47:05,690 INFO [M:0;71d8d2d6408d:43787 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:05,690 DEBUG [M:0;71d8d2d6408d:43787 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732420025083Disabling compacts and flushes for region at 1732420025083Disabling writes for close at 1732420025083Obtaining lock to block concurrent updates at 1732420025084 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732420025084Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1732420025084Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732420025085 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732420025085Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732420025103 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732420025103Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732420025128 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732420025144 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732420025144Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732420025160 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732420025180 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732420025180Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732420025602 (+422 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732420025618 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732420025618Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@385d7ba7: reopening flushed file at 1732420025632 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52567e5a: reopening flushed file at 1732420025642 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fae35fc: reopening flushed file at 1732420025653 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23fe0e9e: reopening flushed file at 1732420025672 (+19 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 599ms, sequenceid=59, compaction requested=false at 1732420025682 (+10 ms)Writing region close event to WAL at 1732420025690 (+8 ms)Closed at 1732420025690 2024-11-24T03:47:05,691 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:05,691 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:05,691 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:05,692 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:05,692 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:05,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741830_1006 (size=27985) 2024-11-24T03:47:05,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38065 is added to blk_1073741830_1006 (size=27985) 2024-11-24T03:47:05,695 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T03:47:05,695 INFO [M:0;71d8d2d6408d:43787 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T03:47:05,696 INFO [M:0;71d8d2d6408d:43787 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43787 2024-11-24T03:47:05,696 INFO [M:0;71d8d2d6408d:43787 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T03:47:05,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:47:05,848 INFO [M:0;71d8d2d6408d:43787 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T03:47:05,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43787-0x1016c3bcae90000, quorum=127.0.0.1:50335, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:47:05,877 WARN [BP-169950544-172.17.0.2-1732419922671 heartbeating to localhost/127.0.0.1:38669 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-169950544-172.17.0.2-1732419922671 (Datanode Uuid aa7021d6-c27d-4877-94f9-b6e1408026f1) service to localhost/127.0.0.1:38669 2024-11-24T03:47:05,894 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/cluster_777397b2-2966-7725-1698-e3d55723f117/data/data3/current/BP-169950544-172.17.0.2-1732419922671 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:05,894 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/cluster_777397b2-2966-7725-1698-e3d55723f117/data/data4/current/BP-169950544-172.17.0.2-1732419922671 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:05,895 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c963ecd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:05,898 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a10aed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:47:05,898 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:47:05,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25ca9bb3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:47:05,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ff5148a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/hadoop.log.dir/,STOPPED} 2024-11-24T03:47:05,903 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:47:05,905 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f93babe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:05,906 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@737d6c99{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:47:05,906 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:47:05,906 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7893eb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:47:05,906 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3305dd74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/hadoop.log.dir/,STOPPED} 2024-11-24T03:47:05,908 WARN [BP-169950544-172.17.0.2-1732419922671 heartbeating to localhost/127.0.0.1:38669 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:47:05,908 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:47:05,908 WARN [BP-169950544-172.17.0.2-1732419922671 heartbeating to localhost/127.0.0.1:38669 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-169950544-172.17.0.2-1732419922671 (Datanode Uuid 14914e37-994b-4bdc-a710-4f8bfbc895cd) service to localhost/127.0.0.1:38669 2024-11-24T03:47:05,908 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:47:05,909 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/cluster_777397b2-2966-7725-1698-e3d55723f117/data/data1/current/BP-169950544-172.17.0.2-1732419922671 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:05,909 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/cluster_777397b2-2966-7725-1698-e3d55723f117/data/data2/current/BP-169950544-172.17.0.2-1732419922671 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:05,910 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:47:05,922 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6de997b9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T03:47:05,923 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a0da00a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:47:05,923 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:47:05,923 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@380b8195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:47:05,923 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aee6cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/hadoop.log.dir/,STOPPED} 2024-11-24T03:47:05,934 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T03:47:05,973 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T03:47:05,983 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=81 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38669 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38669 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38669 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/71d8d2d6408d:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38669 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@7fef2b31 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38669 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38669 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:38669 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/71d8d2d6408d:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/71d8d2d6408d:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38669 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=254 (was 320), ProcessCount=11 (was 11), AvailableMemoryMB=8315 (was 8281) - AvailableMemoryMB LEAK? - 2024-11-24T03:47:05,991 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=82, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=254, ProcessCount=12, AvailableMemoryMB=8312 2024-11-24T03:47:05,991 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T03:47:05,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/hadoop.log.dir so I do NOT create it in target/test-data/76068750-1769-ab70-2dac-720cef5933a3 2024-11-24T03:47:05,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0f4301e9-c43d-4027-0e46-3f38d0c882d3/hadoop.tmp.dir so I do NOT create it in target/test-data/76068750-1769-ab70-2dac-720cef5933a3 2024-11-24T03:47:05,992 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/cluster_980d5a87-d8b1-3a09-5303-d2b252d64b79, deleteOnExit=true 2024-11-24T03:47:05,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T03:47:05,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/test.cache.data in system properties and HBase conf 2024-11-24T03:47:05,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T03:47:05,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/hadoop.log.dir in system properties and HBase conf 2024-11-24T03:47:05,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T03:47:05,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T03:47:05,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T03:47:05,993 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T03:47:05,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T03:47:05,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T03:47:05,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T03:47:05,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T03:47:05,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T03:47:05,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T03:47:05,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T03:47:05,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T03:47:05,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T03:47:05,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/nfs.dump.dir in system properties and HBase conf 2024-11-24T03:47:05,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/java.io.tmpdir in system properties and HBase conf 2024-11-24T03:47:05,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T03:47:05,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T03:47:05,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T03:47:06,011 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T03:47:06,310 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:47:06,320 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:47:06,322 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:47:06,322 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:47:06,322 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T03:47:06,323 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:47:06,323 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5262ced6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:47:06,324 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ad22575{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:47:06,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c07c6a6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/java.io.tmpdir/jetty-localhost-34355-hadoop-hdfs-3_4_1-tests_jar-_-any-13335294112717039873/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T03:47:06,436 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@19c9d0ef{HTTP/1.1, (http/1.1)}{localhost:34355} 2024-11-24T03:47:06,436 INFO [Time-limited test {}] server.Server(415): Started @106171ms 2024-11-24T03:47:06,451 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T03:47:06,673 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:47:06,677 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:47:06,678 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:47:06,679 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:47:06,679 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T03:47:06,679 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a560f35{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:47:06,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18187eea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:47:06,787 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c73701b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/java.io.tmpdir/jetty-localhost-45751-hadoop-hdfs-3_4_1-tests_jar-_-any-12904346127098845585/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:06,788 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f3c4e53{HTTP/1.1, (http/1.1)}{localhost:45751} 2024-11-24T03:47:06,788 INFO [Time-limited test {}] server.Server(415): Started @106523ms 2024-11-24T03:47:06,790 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:47:06,825 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:47:06,829 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:47:06,830 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:47:06,830 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:47:06,830 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T03:47:06,831 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b1e120d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:47:06,831 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78e6e8e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:47:06,964 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@733d9de8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/java.io.tmpdir/jetty-localhost-42567-hadoop-hdfs-3_4_1-tests_jar-_-any-3009699506057467934/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:06,964 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@50acdccf{HTTP/1.1, (http/1.1)}{localhost:42567} 2024-11-24T03:47:06,964 INFO [Time-limited test {}] server.Server(415): Started @106700ms 2024-11-24T03:47:06,966 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:47:07,406 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T03:47:07,407 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T03:47:07,408 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T03:47:07,408 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-24T03:47:07,566 WARN [Thread-452 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/cluster_980d5a87-d8b1-3a09-5303-d2b252d64b79/data/data1/current/BP-80565985-172.17.0.2-1732420026024/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:07,566 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/cluster_980d5a87-d8b1-3a09-5303-d2b252d64b79/data/data2/current/BP-80565985-172.17.0.2-1732420026024/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:07,591 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:47:07,594 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7edcd148f5165290 with lease ID 0xa8e6016af9cfde3a: Processing first storage report for DS-b7efa46e-f0c7-4b73-b736-e5a265e2ee4a from datanode DatanodeRegistration(127.0.0.1:32979, datanodeUuid=fa9fbeda-34b0-4cf8-a7de-d57e7592d795, infoPort=46607, infoSecurePort=0, ipcPort=45735, storageInfo=lv=-57;cid=testClusterID;nsid=1677395049;c=1732420026024) 2024-11-24T03:47:07,594 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7edcd148f5165290 with lease ID 0xa8e6016af9cfde3a: from storage DS-b7efa46e-f0c7-4b73-b736-e5a265e2ee4a node DatanodeRegistration(127.0.0.1:32979, datanodeUuid=fa9fbeda-34b0-4cf8-a7de-d57e7592d795, infoPort=46607, infoSecurePort=0, ipcPort=45735, storageInfo=lv=-57;cid=testClusterID;nsid=1677395049;c=1732420026024), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:07,594 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7edcd148f5165290 with lease ID 0xa8e6016af9cfde3a: Processing first storage report for DS-8a5aaac8-9c8d-49e4-beee-ccb9b5228c84 from datanode DatanodeRegistration(127.0.0.1:32979, datanodeUuid=fa9fbeda-34b0-4cf8-a7de-d57e7592d795, infoPort=46607, infoSecurePort=0, ipcPort=45735, storageInfo=lv=-57;cid=testClusterID;nsid=1677395049;c=1732420026024) 2024-11-24T03:47:07,594 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7edcd148f5165290 with lease ID 0xa8e6016af9cfde3a: from storage DS-8a5aaac8-9c8d-49e4-beee-ccb9b5228c84 node DatanodeRegistration(127.0.0.1:32979, datanodeUuid=fa9fbeda-34b0-4cf8-a7de-d57e7592d795, infoPort=46607, infoSecurePort=0, ipcPort=45735, storageInfo=lv=-57;cid=testClusterID;nsid=1677395049;c=1732420026024), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:07,744 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/cluster_980d5a87-d8b1-3a09-5303-d2b252d64b79/data/data4/current/BP-80565985-172.17.0.2-1732420026024/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:07,744 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/cluster_980d5a87-d8b1-3a09-5303-d2b252d64b79/data/data3/current/BP-80565985-172.17.0.2-1732420026024/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:07,761 WARN [Thread-440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:47:07,764 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94d3d2b432f38785 with lease ID 0xa8e6016af9cfde3b: Processing first storage report for DS-a0b1b76a-50a4-4e98-bf0c-be6efe20f091 from datanode DatanodeRegistration(127.0.0.1:35461, datanodeUuid=b0389787-fbf9-421b-bbe3-44555ac5d8f2, infoPort=38533, infoSecurePort=0, ipcPort=46501, storageInfo=lv=-57;cid=testClusterID;nsid=1677395049;c=1732420026024) 2024-11-24T03:47:07,764 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94d3d2b432f38785 with lease ID 0xa8e6016af9cfde3b: from storage DS-a0b1b76a-50a4-4e98-bf0c-be6efe20f091 node DatanodeRegistration(127.0.0.1:35461, datanodeUuid=b0389787-fbf9-421b-bbe3-44555ac5d8f2, infoPort=38533, infoSecurePort=0, ipcPort=46501, storageInfo=lv=-57;cid=testClusterID;nsid=1677395049;c=1732420026024), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T03:47:07,765 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94d3d2b432f38785 with lease ID 0xa8e6016af9cfde3b: Processing first storage report for DS-79c54e3c-4307-41e7-ae94-052729404be7 from datanode DatanodeRegistration(127.0.0.1:35461, datanodeUuid=b0389787-fbf9-421b-bbe3-44555ac5d8f2, infoPort=38533, infoSecurePort=0, ipcPort=46501, storageInfo=lv=-57;cid=testClusterID;nsid=1677395049;c=1732420026024) 2024-11-24T03:47:07,765 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94d3d2b432f38785 with lease ID 0xa8e6016af9cfde3b: from storage DS-79c54e3c-4307-41e7-ae94-052729404be7 node DatanodeRegistration(127.0.0.1:35461, datanodeUuid=b0389787-fbf9-421b-bbe3-44555ac5d8f2, infoPort=38533, infoSecurePort=0, ipcPort=46501, storageInfo=lv=-57;cid=testClusterID;nsid=1677395049;c=1732420026024), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:07,808 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3 2024-11-24T03:47:07,815 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/cluster_980d5a87-d8b1-3a09-5303-d2b252d64b79/zookeeper_0, clientPort=50464, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/cluster_980d5a87-d8b1-3a09-5303-d2b252d64b79/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/cluster_980d5a87-d8b1-3a09-5303-d2b252d64b79/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T03:47:07,816 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50464 2024-11-24T03:47:07,816 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:07,818 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:07,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741825_1001 (size=7) 2024-11-24T03:47:07,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741825_1001 (size=7) 2024-11-24T03:47:07,832 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f with version=8 2024-11-24T03:47:07,832 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/hbase-staging 2024-11-24T03:47:07,835 INFO [Time-limited test {}] client.ConnectionUtils(128): master/71d8d2d6408d:0 server-side Connection retries=45 2024-11-24T03:47:07,835 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:07,835 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:07,835 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T03:47:07,835 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:07,835 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T03:47:07,835 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T03:47:07,836 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T03:47:07,836 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41491 2024-11-24T03:47:07,838 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41491 connecting to ZooKeeper ensemble=127.0.0.1:50464 2024-11-24T03:47:07,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:414910x0, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T03:47:07,901 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41491-0x1016c3d58690000 connected 2024-11-24T03:47:07,960 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:07,963 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:07,966 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:47:07,967 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f, hbase.cluster.distributed=false 2024-11-24T03:47:07,969 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T03:47:07,969 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41491 2024-11-24T03:47:07,969 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41491 2024-11-24T03:47:07,970 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41491 2024-11-24T03:47:07,970 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41491 2024-11-24T03:47:07,970 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41491 2024-11-24T03:47:07,994 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/71d8d2d6408d:0 server-side Connection retries=45 2024-11-24T03:47:07,994 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:07,994 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:07,994 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T03:47:07,994 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:07,995 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T03:47:07,995 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T03:47:07,995 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T03:47:07,996 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44465 2024-11-24T03:47:07,997 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44465 connecting to ZooKeeper ensemble=127.0.0.1:50464 2024-11-24T03:47:07,998 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:08,001 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:08,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:444650x0, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T03:47:08,018 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:444650x0, quorum=127.0.0.1:50464, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:47:08,018 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44465-0x1016c3d58690001 connected 2024-11-24T03:47:08,019 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T03:47:08,020 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T03:47:08,021 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T03:47:08,023 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T03:47:08,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44465 2024-11-24T03:47:08,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44465 2024-11-24T03:47:08,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44465 2024-11-24T03:47:08,028 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44465 2024-11-24T03:47:08,030 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44465 2024-11-24T03:47:08,044 DEBUG [M:0;71d8d2d6408d:41491 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;71d8d2d6408d:41491 2024-11-24T03:47:08,047 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/71d8d2d6408d,41491,1732420027835 2024-11-24T03:47:08,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:47:08,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:47:08,052 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/71d8d2d6408d,41491,1732420027835 2024-11-24T03:47:08,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T03:47:08,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:08,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:08,060 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T03:47:08,062 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/71d8d2d6408d,41491,1732420027835 from backup master directory 2024-11-24T03:47:08,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/71d8d2d6408d,41491,1732420027835 2024-11-24T03:47:08,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:47:08,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:47:08,068 WARN [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T03:47:08,068 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=71d8d2d6408d,41491,1732420027835 2024-11-24T03:47:08,074 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/hbase.id] with ID: f6a82c1b-000a-4a5f-92e6-f0b9dc53ef81 2024-11-24T03:47:08,074 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/.tmp/hbase.id 2024-11-24T03:47:08,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741826_1002 (size=42) 2024-11-24T03:47:08,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741826_1002 (size=42) 2024-11-24T03:47:08,086 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/.tmp/hbase.id]:[hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/hbase.id] 2024-11-24T03:47:08,102 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:08,102 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T03:47:08,105 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-11-24T03:47:08,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:08,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:08,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741827_1003 (size=196) 2024-11-24T03:47:08,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741827_1003 (size=196) 2024-11-24T03:47:08,120 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T03:47:08,121 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T03:47:08,122 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:47:08,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741828_1004 (size=1189) 2024-11-24T03:47:08,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741828_1004 (size=1189) 2024-11-24T03:47:08,132 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store 2024-11-24T03:47:08,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741829_1005 (size=34) 2024-11-24T03:47:08,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741829_1005 (size=34) 2024-11-24T03:47:08,141 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:47:08,141 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T03:47:08,141 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:08,141 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:08,141 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T03:47:08,141 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:08,141 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:08,142 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732420028141Disabling compacts and flushes for region at 1732420028141Disabling writes for close at 1732420028141Writing region close event to WAL at 1732420028141Closed at 1732420028141 2024-11-24T03:47:08,143 WARN [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/.initializing 2024-11-24T03:47:08,143 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/WALs/71d8d2d6408d,41491,1732420027835 2024-11-24T03:47:08,147 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C41491%2C1732420027835, suffix=, logDir=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/WALs/71d8d2d6408d,41491,1732420027835, archiveDir=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/oldWALs, maxLogs=10 2024-11-24T03:47:08,148 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C41491%2C1732420027835.1732420028148 2024-11-24T03:47:08,156 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/WALs/71d8d2d6408d,41491,1732420027835/71d8d2d6408d%2C41491%2C1732420027835.1732420028148 2024-11-24T03:47:08,156 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38533:38533),(127.0.0.1/127.0.0.1:46607:46607)] 2024-11-24T03:47:08,157 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:47:08,157 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:47:08,157 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:08,157 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:08,159 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:08,161 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T03:47:08,161 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:08,162 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:08,162 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:08,164 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T03:47:08,164 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:08,165 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:47:08,165 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:08,168 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T03:47:08,169 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:08,169 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:47:08,170 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:08,171 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T03:47:08,171 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:08,172 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:47:08,173 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:08,174 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:08,174 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:08,176 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:08,176 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:08,177 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T03:47:08,179 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:08,181 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:47:08,182 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=764719, jitterRate=-0.027609825134277344}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T03:47:08,183 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732420028158Initializing all the Stores at 1732420028159 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420028159Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420028159Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420028159Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420028159Cleaning up temporary data from old regions at 1732420028176 (+17 ms)Region opened successfully at 1732420028183 (+7 ms) 2024-11-24T03:47:08,183 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T03:47:08,189 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3aaed38e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=71d8d2d6408d/172.17.0.2:0 2024-11-24T03:47:08,190 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T03:47:08,190 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T03:47:08,191 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T03:47:08,191 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T03:47:08,192 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T03:47:08,192 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T03:47:08,192 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T03:47:08,195 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T03:47:08,197 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T03:47:08,209 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T03:47:08,210 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T03:47:08,211 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T03:47:08,218 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T03:47:08,219 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T03:47:08,220 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T03:47:08,226 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T03:47:08,228 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T03:47:08,235 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T03:47:08,239 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T03:47:08,251 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T03:47:08,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T03:47:08,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T03:47:08,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:08,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:08,261 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=71d8d2d6408d,41491,1732420027835, sessionid=0x1016c3d58690000, setting cluster-up flag (Was=false) 2024-11-24T03:47:08,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:08,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:08,310 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T03:47:08,313 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=71d8d2d6408d,41491,1732420027835 2024-11-24T03:47:08,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:08,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:08,351 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T03:47:08,353 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=71d8d2d6408d,41491,1732420027835 2024-11-24T03:47:08,355 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T03:47:08,357 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T03:47:08,357 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T03:47:08,357 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T03:47:08,357 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 71d8d2d6408d,41491,1732420027835 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T03:47:08,359 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:47:08,359 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:47:08,359 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:47:08,359 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:47:08,360 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/71d8d2d6408d:0, corePoolSize=10, maxPoolSize=10 2024-11-24T03:47:08,360 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:08,360 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T03:47:08,360 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:08,361 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732420058360 2024-11-24T03:47:08,361 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T03:47:08,361 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T03:47:08,361 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T03:47:08,361 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T03:47:08,361 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T03:47:08,361 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T03:47:08,361 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:08,362 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T03:47:08,362 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T03:47:08,362 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:47:08,362 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T03:47:08,362 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T03:47:08,362 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T03:47:08,362 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T03:47:08,362 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420028362,5,FailOnTimeoutGroup] 2024-11-24T03:47:08,363 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420028362,5,FailOnTimeoutGroup] 2024-11-24T03:47:08,363 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:08,363 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T03:47:08,363 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:08,363 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:08,363 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:08,364 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T03:47:08,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741831_1007 (size=1321) 2024-11-24T03:47:08,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741831_1007 (size=1321) 2024-11-24T03:47:08,373 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T03:47:08,373 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f 2024-11-24T03:47:08,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741832_1008 (size=32) 2024-11-24T03:47:08,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741832_1008 (size=32) 2024-11-24T03:47:08,389 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:47:08,391 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T03:47:08,393 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T03:47:08,393 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:08,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:08,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T03:47:08,396 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T03:47:08,396 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:08,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:08,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T03:47:08,398 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T03:47:08,399 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:08,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:08,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T03:47:08,402 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T03:47:08,402 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:08,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:08,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T03:47:08,404 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/data/hbase/meta/1588230740 2024-11-24T03:47:08,405 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/data/hbase/meta/1588230740 2024-11-24T03:47:08,406 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T03:47:08,406 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T03:47:08,407 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T03:47:08,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T03:47:08,411 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:47:08,412 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694863, jitterRate=-0.11643724143505096}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T03:47:08,412 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732420028389Initializing all the Stores at 1732420028390 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420028390Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420028390Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420028391 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420028391Cleaning up temporary data from old regions at 1732420028406 (+15 ms)Region opened successfully at 1732420028412 (+6 ms) 2024-11-24T03:47:08,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T03:47:08,413 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T03:47:08,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T03:47:08,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T03:47:08,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T03:47:08,413 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T03:47:08,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732420028413Disabling compacts and flushes for region at 1732420028413Disabling writes for close at 1732420028413Writing region close event to WAL at 1732420028413Closed at 1732420028413 2024-11-24T03:47:08,415 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:47:08,415 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T03:47:08,415 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T03:47:08,417 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T03:47:08,418 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T03:47:08,433 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.HRegionServer(746): ClusterId : f6a82c1b-000a-4a5f-92e6-f0b9dc53ef81 2024-11-24T03:47:08,433 DEBUG [RS:0;71d8d2d6408d:44465 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T03:47:08,461 DEBUG [RS:0;71d8d2d6408d:44465 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T03:47:08,461 DEBUG [RS:0;71d8d2d6408d:44465 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T03:47:08,469 DEBUG [RS:0;71d8d2d6408d:44465 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T03:47:08,470 DEBUG [RS:0;71d8d2d6408d:44465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e116948, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=71d8d2d6408d/172.17.0.2:0 2024-11-24T03:47:08,482 DEBUG [RS:0;71d8d2d6408d:44465 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;71d8d2d6408d:44465 2024-11-24T03:47:08,482 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T03:47:08,482 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T03:47:08,482 DEBUG [RS:0;71d8d2d6408d:44465 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T03:47:08,483 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.HRegionServer(2659): reportForDuty to master=71d8d2d6408d,41491,1732420027835 with port=44465, startcode=1732420027993 2024-11-24T03:47:08,483 DEBUG [RS:0;71d8d2d6408d:44465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T03:47:08,487 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59403, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T03:47:08,487 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41491 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 71d8d2d6408d,44465,1732420027993 2024-11-24T03:47:08,487 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41491 {}] master.ServerManager(517): Registering regionserver=71d8d2d6408d,44465,1732420027993 2024-11-24T03:47:08,490 DEBUG [RS:0;71d8d2d6408d:44465 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f 2024-11-24T03:47:08,490 DEBUG [RS:0;71d8d2d6408d:44465 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38553 2024-11-24T03:47:08,490 DEBUG [RS:0;71d8d2d6408d:44465 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T03:47:08,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T03:47:08,502 DEBUG [RS:0;71d8d2d6408d:44465 {}] zookeeper.ZKUtil(111): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/71d8d2d6408d,44465,1732420027993 2024-11-24T03:47:08,502 WARN [RS:0;71d8d2d6408d:44465 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T03:47:08,502 INFO [RS:0;71d8d2d6408d:44465 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:47:08,502 DEBUG [RS:0;71d8d2d6408d:44465 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/WALs/71d8d2d6408d,44465,1732420027993 2024-11-24T03:47:08,511 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [71d8d2d6408d,44465,1732420027993] 2024-11-24T03:47:08,512 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T03:47:08,515 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T03:47:08,516 INFO [RS:0;71d8d2d6408d:44465 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T03:47:08,516 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:08,518 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T03:47:08,520 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T03:47:08,520 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:08,520 DEBUG [RS:0;71d8d2d6408d:44465 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:08,520 DEBUG [RS:0;71d8d2d6408d:44465 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:08,520 DEBUG [RS:0;71d8d2d6408d:44465 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:08,520 DEBUG [RS:0;71d8d2d6408d:44465 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:08,521 DEBUG [RS:0;71d8d2d6408d:44465 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:08,521 DEBUG [RS:0;71d8d2d6408d:44465 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/71d8d2d6408d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T03:47:08,521 DEBUG [RS:0;71d8d2d6408d:44465 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:08,521 DEBUG [RS:0;71d8d2d6408d:44465 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:08,521 DEBUG [RS:0;71d8d2d6408d:44465 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:08,521 DEBUG [RS:0;71d8d2d6408d:44465 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:08,521 DEBUG [RS:0;71d8d2d6408d:44465 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:08,521 DEBUG [RS:0;71d8d2d6408d:44465 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:08,521 DEBUG [RS:0;71d8d2d6408d:44465 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/71d8d2d6408d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T03:47:08,521 DEBUG [RS:0;71d8d2d6408d:44465 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T03:47:08,522 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:08,522 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:08,522 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:08,522 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:08,522 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:08,522 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,44465,1732420027993-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T03:47:08,537 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T03:47:08,537 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,44465,1732420027993-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:08,537 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:08,537 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.Replication(171): 71d8d2d6408d,44465,1732420027993 started 2024-11-24T03:47:08,553 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:08,553 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.HRegionServer(1482): Serving as 71d8d2d6408d,44465,1732420027993, RpcServer on 71d8d2d6408d/172.17.0.2:44465, sessionid=0x1016c3d58690001 2024-11-24T03:47:08,554 DEBUG [RS:0;71d8d2d6408d:44465 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T03:47:08,554 DEBUG [RS:0;71d8d2d6408d:44465 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 71d8d2d6408d,44465,1732420027993 2024-11-24T03:47:08,554 DEBUG [RS:0;71d8d2d6408d:44465 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '71d8d2d6408d,44465,1732420027993' 2024-11-24T03:47:08,554 DEBUG [RS:0;71d8d2d6408d:44465 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T03:47:08,554 DEBUG [RS:0;71d8d2d6408d:44465 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T03:47:08,555 DEBUG [RS:0;71d8d2d6408d:44465 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T03:47:08,555 DEBUG [RS:0;71d8d2d6408d:44465 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T03:47:08,555 DEBUG [RS:0;71d8d2d6408d:44465 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 71d8d2d6408d,44465,1732420027993 2024-11-24T03:47:08,555 DEBUG [RS:0;71d8d2d6408d:44465 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '71d8d2d6408d,44465,1732420027993' 2024-11-24T03:47:08,555 DEBUG [RS:0;71d8d2d6408d:44465 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T03:47:08,556 DEBUG [RS:0;71d8d2d6408d:44465 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T03:47:08,556 DEBUG [RS:0;71d8d2d6408d:44465 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T03:47:08,556 INFO [RS:0;71d8d2d6408d:44465 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T03:47:08,556 INFO [RS:0;71d8d2d6408d:44465 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T03:47:08,569 WARN [71d8d2d6408d:41491 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T03:47:08,659 INFO [RS:0;71d8d2d6408d:44465 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C44465%2C1732420027993, suffix=, logDir=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/WALs/71d8d2d6408d,44465,1732420027993, archiveDir=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/oldWALs, maxLogs=32 2024-11-24T03:47:08,663 INFO [RS:0;71d8d2d6408d:44465 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C44465%2C1732420027993.1732420028662 2024-11-24T03:47:08,672 INFO [RS:0;71d8d2d6408d:44465 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/WALs/71d8d2d6408d,44465,1732420027993/71d8d2d6408d%2C44465%2C1732420027993.1732420028662 2024-11-24T03:47:08,673 DEBUG [RS:0;71d8d2d6408d:44465 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38533:38533),(127.0.0.1/127.0.0.1:46607:46607)] 2024-11-24T03:47:08,819 DEBUG [71d8d2d6408d:41491 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T03:47:08,820 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=71d8d2d6408d,44465,1732420027993 2024-11-24T03:47:08,822 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 71d8d2d6408d,44465,1732420027993, state=OPENING 2024-11-24T03:47:08,835 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T03:47:08,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:08,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:08,844 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T03:47:08,844 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:47:08,844 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:47:08,844 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=71d8d2d6408d,44465,1732420027993}] 2024-11-24T03:47:08,999 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T03:47:09,002 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59041, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T03:47:09,008 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T03:47:09,009 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:47:09,011 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C44465%2C1732420027993.meta, suffix=.meta, logDir=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/WALs/71d8d2d6408d,44465,1732420027993, archiveDir=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/oldWALs, maxLogs=32 2024-11-24T03:47:09,014 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C44465%2C1732420027993.meta.1732420029014.meta 2024-11-24T03:47:09,026 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/WALs/71d8d2d6408d,44465,1732420027993/71d8d2d6408d%2C44465%2C1732420027993.meta.1732420029014.meta 2024-11-24T03:47:09,027 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38533:38533),(127.0.0.1/127.0.0.1:46607:46607)] 2024-11-24T03:47:09,028 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:47:09,028 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T03:47:09,028 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T03:47:09,028 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T03:47:09,028 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T03:47:09,029 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:47:09,029 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T03:47:09,029 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T03:47:09,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T03:47:09,032 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T03:47:09,032 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:09,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:09,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T03:47:09,034 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T03:47:09,034 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:09,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:09,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T03:47:09,036 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T03:47:09,036 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:09,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:09,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T03:47:09,038 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T03:47:09,038 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:09,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:09,039 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T03:47:09,040 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/data/hbase/meta/1588230740 2024-11-24T03:47:09,042 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/data/hbase/meta/1588230740 2024-11-24T03:47:09,043 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T03:47:09,043 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T03:47:09,044 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T03:47:09,045 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T03:47:09,047 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=802372, jitterRate=0.02026970684528351}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T03:47:09,047 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T03:47:09,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:09,048 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732420029029Writing region info on filesystem at 1732420029029Initializing all the Stores at 1732420029030 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420029030Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420029030Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420029030Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420029030Cleaning up temporary data from old regions at 1732420029043 (+13 ms)Running coprocessor post-open hooks at 1732420029047 (+4 ms)Region opened successfully at 1732420029048 (+1 ms) 2024-11-24T03:47:09,049 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732420028999 2024-11-24T03:47:09,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:09,052 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T03:47:09,052 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T03:47:09,053 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=71d8d2d6408d,44465,1732420027993 2024-11-24T03:47:09,055 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 71d8d2d6408d,44465,1732420027993, state=OPEN 2024-11-24T03:47:09,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T03:47:09,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T03:47:09,101 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:47:09,101 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:47:09,101 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=71d8d2d6408d,44465,1732420027993 2024-11-24T03:47:09,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T03:47:09,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=71d8d2d6408d,44465,1732420027993 in 257 msec 2024-11-24T03:47:09,109 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T03:47:09,109 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 690 msec 2024-11-24T03:47:09,110 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:47:09,110 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T03:47:09,112 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T03:47:09,112 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=71d8d2d6408d,44465,1732420027993, seqNum=-1] 2024-11-24T03:47:09,113 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T03:47:09,114 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35107, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T03:47:09,123 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 765 msec 2024-11-24T03:47:09,123 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732420029123, completionTime=-1 2024-11-24T03:47:09,123 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T03:47:09,123 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T03:47:09,126 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T03:47:09,126 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732420089126 2024-11-24T03:47:09,126 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732420149126 2024-11-24T03:47:09,126 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-24T03:47:09,126 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,41491,1732420027835-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:09,127 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,41491,1732420027835-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:09,127 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,41491,1732420027835-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:09,127 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-71d8d2d6408d:41491, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:09,127 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:09,129 DEBUG [master/71d8d2d6408d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T03:47:09,130 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:09,136 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.068sec 2024-11-24T03:47:09,136 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T03:47:09,136 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T03:47:09,136 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T03:47:09,136 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T03:47:09,136 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T03:47:09,136 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,41491,1732420027835-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T03:47:09,136 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,41491,1732420027835-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T03:47:09,141 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T03:47:09,141 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T03:47:09,141 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,41491,1732420027835-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:09,237 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16b23b42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:47:09,237 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 71d8d2d6408d,41491,-1 for getting cluster id 2024-11-24T03:47:09,237 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T03:47:09,240 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f6a82c1b-000a-4a5f-92e6-f0b9dc53ef81' 2024-11-24T03:47:09,240 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T03:47:09,240 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f6a82c1b-000a-4a5f-92e6-f0b9dc53ef81" 2024-11-24T03:47:09,241 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38887dbe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:47:09,241 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [71d8d2d6408d,41491,-1] 2024-11-24T03:47:09,241 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T03:47:09,242 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:09,244 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55758, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T03:47:09,245 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78731c35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:47:09,246 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T03:47:09,247 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=71d8d2d6408d,44465,1732420027993, seqNum=-1] 2024-11-24T03:47:09,247 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T03:47:09,249 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58012, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T03:47:09,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=71d8d2d6408d,41491,1732420027835 2024-11-24T03:47:09,252 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:09,256 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T03:47:09,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T03:47:09,256 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T03:47:09,256 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:47:09,256 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:09,257 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:09,257 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T03:47:09,257 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T03:47:09,257 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=650663950, stopped=false 2024-11-24T03:47:09,257 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=71d8d2d6408d,41491,1732420027835 2024-11-24T03:47:09,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T03:47:09,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T03:47:09,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:09,285 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T03:47:09,285 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T03:47:09,285 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:47:09,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:09,285 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:09,285 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:47:09,286 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '71d8d2d6408d,44465,1732420027993' ***** 2024-11-24T03:47:09,286 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T03:47:09,287 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:47:09,287 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T03:47:09,287 INFO [RS:0;71d8d2d6408d:44465 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T03:47:09,287 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T03:47:09,287 INFO [RS:0;71d8d2d6408d:44465 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T03:47:09,287 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.HRegionServer(959): stopping server 71d8d2d6408d,44465,1732420027993 2024-11-24T03:47:09,287 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T03:47:09,288 INFO [RS:0;71d8d2d6408d:44465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;71d8d2d6408d:44465. 2024-11-24T03:47:09,288 DEBUG [RS:0;71d8d2d6408d:44465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:47:09,288 DEBUG [RS:0;71d8d2d6408d:44465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:09,288 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T03:47:09,288 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T03:47:09,288 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T03:47:09,288 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T03:47:09,290 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T03:47:09,291 DEBUG [RS:0;71d8d2d6408d:44465 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-24T03:47:09,291 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T03:47:09,291 DEBUG [RS:0;71d8d2d6408d:44465 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T03:47:09,291 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T03:47:09,291 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T03:47:09,291 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T03:47:09,291 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T03:47:09,291 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-24T03:47:09,313 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/data/hbase/meta/1588230740/.tmp/ns/2b06a8c61a604978b7aa1847ac4e904f is 43, key is default/ns:d/1732420029115/Put/seqid=0 2024-11-24T03:47:09,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741835_1011 (size=5153) 2024-11-24T03:47:09,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741835_1011 (size=5153) 2024-11-24T03:47:09,325 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/data/hbase/meta/1588230740/.tmp/ns/2b06a8c61a604978b7aa1847ac4e904f 2024-11-24T03:47:09,336 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/data/hbase/meta/1588230740/.tmp/ns/2b06a8c61a604978b7aa1847ac4e904f as hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/data/hbase/meta/1588230740/ns/2b06a8c61a604978b7aa1847ac4e904f 2024-11-24T03:47:09,344 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/data/hbase/meta/1588230740/ns/2b06a8c61a604978b7aa1847ac4e904f, entries=2, sequenceid=6, filesize=5.0 K 2024-11-24T03:47:09,346 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 55ms, sequenceid=6, compaction requested=false 2024-11-24T03:47:09,346 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T03:47:09,352 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-24T03:47:09,353 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T03:47:09,353 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T03:47:09,353 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732420029291Running coprocessor pre-close hooks at 1732420029291Disabling compacts and flushes for region at 1732420029291Disabling writes for close at 1732420029291Obtaining lock to block concurrent updates at 1732420029291Preparing flush snapshotting stores in 1588230740 at 1732420029291Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732420029292 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732420029293 (+1 ms)Flushing 1588230740/ns: creating writer at 1732420029293Flushing 1588230740/ns: appending metadata at 1732420029312 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1732420029312Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6444acb3: reopening flushed file at 1732420029334 (+22 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 55ms, sequenceid=6, compaction requested=false at 1732420029346 (+12 ms)Writing region close event to WAL at 1732420029347 (+1 ms)Running coprocessor post-close hooks at 1732420029353 (+6 ms)Closed at 1732420029353 2024-11-24T03:47:09,353 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T03:47:09,491 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.HRegionServer(976): stopping server 71d8d2d6408d,44465,1732420027993; all regions closed. 2024-11-24T03:47:09,492 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:09,492 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:09,492 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:09,492 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:09,492 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:09,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741834_1010 (size=1152) 2024-11-24T03:47:09,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741834_1010 (size=1152) 2024-11-24T03:47:09,499 DEBUG [RS:0;71d8d2d6408d:44465 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/oldWALs 2024-11-24T03:47:09,499 INFO [RS:0;71d8d2d6408d:44465 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 71d8d2d6408d%2C44465%2C1732420027993.meta:.meta(num 1732420029014) 2024-11-24T03:47:09,500 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:09,500 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:09,500 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:09,500 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:09,500 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:09,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741833_1009 (size=93) 2024-11-24T03:47:09,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741833_1009 (size=93) 2024-11-24T03:47:09,508 DEBUG [RS:0;71d8d2d6408d:44465 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/oldWALs 2024-11-24T03:47:09,508 INFO [RS:0;71d8d2d6408d:44465 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 71d8d2d6408d%2C44465%2C1732420027993:(num 1732420028662) 2024-11-24T03:47:09,509 DEBUG [RS:0;71d8d2d6408d:44465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:09,509 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T03:47:09,509 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T03:47:09,509 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.ChoreService(370): Chore service for: regionserver/71d8d2d6408d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T03:47:09,509 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T03:47:09,509 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T03:47:09,509 INFO [RS:0;71d8d2d6408d:44465 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44465 2024-11-24T03:47:09,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/71d8d2d6408d,44465,1732420027993 2024-11-24T03:47:09,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T03:47:09,535 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T03:47:09,535 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$362/0x00007f6f70902060@49fb538 rejected from java.util.concurrent.ThreadPoolExecutor@419d2bcf[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-24T03:47:09,543 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [71d8d2d6408d,44465,1732420027993] 2024-11-24T03:47:09,575 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T03:47:09,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:09,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:09,601 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/71d8d2d6408d,44465,1732420027993 already deleted, retry=false 2024-11-24T03:47:09,601 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 71d8d2d6408d,44465,1732420027993 expired; onlineServers=0 2024-11-24T03:47:09,601 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '71d8d2d6408d,41491,1732420027835' ***** 2024-11-24T03:47:09,601 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T03:47:09,601 INFO [M:0;71d8d2d6408d:41491 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T03:47:09,602 INFO [M:0;71d8d2d6408d:41491 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T03:47:09,602 DEBUG [M:0;71d8d2d6408d:41491 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T03:47:09,602 DEBUG [M:0;71d8d2d6408d:41491 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T03:47:09,602 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420028362 {}] cleaner.HFileCleaner(306): Exit Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420028362,5,FailOnTimeoutGroup] 2024-11-24T03:47:09,602 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T03:47:09,602 INFO [M:0;71d8d2d6408d:41491 {}] hbase.ChoreService(370): Chore service for: master/71d8d2d6408d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T03:47:09,602 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420028362 {}] cleaner.HFileCleaner(306): Exit Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420028362,5,FailOnTimeoutGroup] 2024-11-24T03:47:09,602 INFO [M:0;71d8d2d6408d:41491 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T03:47:09,602 DEBUG [M:0;71d8d2d6408d:41491 {}] master.HMaster(1795): Stopping service threads 2024-11-24T03:47:09,602 INFO [M:0;71d8d2d6408d:41491 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T03:47:09,602 INFO [M:0;71d8d2d6408d:41491 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T03:47:09,603 INFO [M:0;71d8d2d6408d:41491 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T03:47:09,603 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T03:47:09,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T03:47:09,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:09,618 DEBUG [M:0;71d8d2d6408d:41491 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-24T03:47:09,618 DEBUG [M:0;71d8d2d6408d:41491 {}] master.ActiveMasterManager(353): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-24T03:47:09,619 INFO [M:0;71d8d2d6408d:41491 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/.lastflushedseqids 2024-11-24T03:47:09,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741836_1012 (size=99) 2024-11-24T03:47:09,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741836_1012 (size=99) 2024-11-24T03:47:09,626 INFO [M:0;71d8d2d6408d:41491 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T03:47:09,626 INFO [M:0;71d8d2d6408d:41491 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T03:47:09,627 DEBUG [M:0;71d8d2d6408d:41491 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T03:47:09,627 INFO [M:0;71d8d2d6408d:41491 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:09,627 DEBUG [M:0;71d8d2d6408d:41491 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:09,627 DEBUG [M:0;71d8d2d6408d:41491 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T03:47:09,627 DEBUG [M:0;71d8d2d6408d:41491 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:09,627 INFO [M:0;71d8d2d6408d:41491 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-24T03:47:09,643 INFO [RS:0;71d8d2d6408d:44465 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T03:47:09,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:47:09,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44465-0x1016c3d58690001, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:47:09,643 INFO [RS:0;71d8d2d6408d:44465 {}] regionserver.HRegionServer(1031): Exiting; stopping=71d8d2d6408d,44465,1732420027993; zookeeper connection closed. 2024-11-24T03:47:09,644 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@21bc12e9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@21bc12e9 2024-11-24T03:47:09,644 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T03:47:09,646 DEBUG [M:0;71d8d2d6408d:41491 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7244eeadb02845d693939294dfda05ee is 82, key is hbase:meta,,1/info:regioninfo/1732420029053/Put/seqid=0 2024-11-24T03:47:09,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741837_1013 (size=5672) 2024-11-24T03:47:09,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741837_1013 (size=5672) 2024-11-24T03:47:09,653 INFO [M:0;71d8d2d6408d:41491 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7244eeadb02845d693939294dfda05ee 2024-11-24T03:47:09,680 DEBUG [M:0;71d8d2d6408d:41491 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0d9d2676bca34e1887f1441257a0c821 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732420029122/Put/seqid=0 2024-11-24T03:47:09,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741838_1014 (size=5275) 2024-11-24T03:47:09,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741838_1014 (size=5275) 2024-11-24T03:47:10,089 INFO [M:0;71d8d2d6408d:41491 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0d9d2676bca34e1887f1441257a0c821 2024-11-24T03:47:10,115 DEBUG [M:0;71d8d2d6408d:41491 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2d309ef8209d4670be1f0d4cf13d271b is 69, key is 71d8d2d6408d,44465,1732420027993/rs:state/1732420028488/Put/seqid=0 2024-11-24T03:47:10,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741839_1015 (size=5156) 2024-11-24T03:47:10,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741839_1015 (size=5156) 2024-11-24T03:47:10,121 INFO [M:0;71d8d2d6408d:41491 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2d309ef8209d4670be1f0d4cf13d271b 2024-11-24T03:47:10,145 DEBUG [M:0;71d8d2d6408d:41491 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2c8740eb38774daeb7417bc3901da16b is 52, key is load_balancer_on/state:d/1732420029254/Put/seqid=0 2024-11-24T03:47:10,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741840_1016 (size=5056) 2024-11-24T03:47:10,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741840_1016 (size=5056) 2024-11-24T03:47:10,152 INFO [M:0;71d8d2d6408d:41491 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2c8740eb38774daeb7417bc3901da16b 2024-11-24T03:47:10,160 DEBUG [M:0;71d8d2d6408d:41491 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7244eeadb02845d693939294dfda05ee as hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7244eeadb02845d693939294dfda05ee 2024-11-24T03:47:10,168 INFO [M:0;71d8d2d6408d:41491 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7244eeadb02845d693939294dfda05ee, entries=8, sequenceid=29, filesize=5.5 K 2024-11-24T03:47:10,170 DEBUG [M:0;71d8d2d6408d:41491 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0d9d2676bca34e1887f1441257a0c821 as hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0d9d2676bca34e1887f1441257a0c821 2024-11-24T03:47:10,177 INFO [M:0;71d8d2d6408d:41491 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0d9d2676bca34e1887f1441257a0c821, entries=3, sequenceid=29, filesize=5.2 K 2024-11-24T03:47:10,179 DEBUG [M:0;71d8d2d6408d:41491 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2d309ef8209d4670be1f0d4cf13d271b as hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2d309ef8209d4670be1f0d4cf13d271b 2024-11-24T03:47:10,185 INFO [M:0;71d8d2d6408d:41491 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2d309ef8209d4670be1f0d4cf13d271b, entries=1, sequenceid=29, filesize=5.0 K 2024-11-24T03:47:10,187 DEBUG [M:0;71d8d2d6408d:41491 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2c8740eb38774daeb7417bc3901da16b as hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2c8740eb38774daeb7417bc3901da16b 2024-11-24T03:47:10,193 INFO [M:0;71d8d2d6408d:41491 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38553/user/jenkins/test-data/64bce189-5add-e420-57a5-1fe281358a5f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2c8740eb38774daeb7417bc3901da16b, entries=1, sequenceid=29, filesize=4.9 K 2024-11-24T03:47:10,195 INFO [M:0;71d8d2d6408d:41491 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 568ms, sequenceid=29, compaction requested=false 2024-11-24T03:47:10,196 INFO [M:0;71d8d2d6408d:41491 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:10,197 DEBUG [M:0;71d8d2d6408d:41491 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732420029627Disabling compacts and flushes for region at 1732420029627Disabling writes for close at 1732420029627Obtaining lock to block concurrent updates at 1732420029627Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732420029627Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732420029628 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732420029628Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732420029629 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732420029646 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732420029646Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732420029660 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732420029679 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732420029679Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732420030098 (+419 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732420030114 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732420030114Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732420030128 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732420030145 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732420030145Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75257b29: reopening flushed file at 1732420030159 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63115eb7: reopening flushed file at 1732420030169 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d626735: reopening flushed file at 1732420030177 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46e531ed: reopening flushed file at 1732420030185 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 568ms, sequenceid=29, compaction requested=false at 1732420030195 (+10 ms)Writing region close event to WAL at 1732420030196 (+1 ms)Closed at 1732420030196 2024-11-24T03:47:10,197 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:10,197 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:10,197 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:10,197 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:10,197 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:10,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32979 is added to blk_1073741830_1006 (size=10311) 2024-11-24T03:47:10,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741830_1006 (size=10311) 2024-11-24T03:47:10,523 INFO [regionserver/71d8d2d6408d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T03:47:10,601 INFO [M:0;71d8d2d6408d:41491 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T03:47:10,601 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T03:47:10,601 INFO [M:0;71d8d2d6408d:41491 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41491 2024-11-24T03:47:10,602 INFO [M:0;71d8d2d6408d:41491 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T03:47:10,751 INFO [M:0;71d8d2d6408d:41491 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T03:47:10,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:47:10,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41491-0x1016c3d58690000, quorum=127.0.0.1:50464, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:47:10,756 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@733d9de8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:10,756 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@50acdccf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:47:10,757 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:47:10,757 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78e6e8e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:47:10,757 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b1e120d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/hadoop.log.dir/,STOPPED} 2024-11-24T03:47:10,760 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:47:10,760 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:47:10,760 WARN [BP-80565985-172.17.0.2-1732420026024 heartbeating to localhost/127.0.0.1:38553 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:47:10,760 WARN [BP-80565985-172.17.0.2-1732420026024 heartbeating to localhost/127.0.0.1:38553 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-80565985-172.17.0.2-1732420026024 (Datanode Uuid b0389787-fbf9-421b-bbe3-44555ac5d8f2) service to localhost/127.0.0.1:38553 2024-11-24T03:47:10,761 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/cluster_980d5a87-d8b1-3a09-5303-d2b252d64b79/data/data3/current/BP-80565985-172.17.0.2-1732420026024 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:10,761 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/cluster_980d5a87-d8b1-3a09-5303-d2b252d64b79/data/data4/current/BP-80565985-172.17.0.2-1732420026024 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:10,762 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:47:10,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c73701b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:10,771 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f3c4e53{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:47:10,771 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:47:10,771 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18187eea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:47:10,771 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a560f35{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/hadoop.log.dir/,STOPPED} 2024-11-24T03:47:10,773 WARN [BP-80565985-172.17.0.2-1732420026024 heartbeating to localhost/127.0.0.1:38553 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:47:10,773 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:47:10,773 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:47:10,773 WARN [BP-80565985-172.17.0.2-1732420026024 heartbeating to localhost/127.0.0.1:38553 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-80565985-172.17.0.2-1732420026024 (Datanode Uuid fa9fbeda-34b0-4cf8-a7de-d57e7592d795) service to localhost/127.0.0.1:38553 2024-11-24T03:47:10,773 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/cluster_980d5a87-d8b1-3a09-5303-d2b252d64b79/data/data1/current/BP-80565985-172.17.0.2-1732420026024 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:10,774 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/cluster_980d5a87-d8b1-3a09-5303-d2b252d64b79/data/data2/current/BP-80565985-172.17.0.2-1732420026024 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:10,774 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:47:10,779 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c07c6a6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T03:47:10,780 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@19c9d0ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:47:10,780 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:47:10,780 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ad22575{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:47:10,781 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5262ced6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/hadoop.log.dir/,STOPPED} 2024-11-24T03:47:10,788 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T03:47:10,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T03:47:10,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T03:47:10,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/hadoop.log.dir so I do NOT create it in target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc 2024-11-24T03:47:10,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/76068750-1769-ab70-2dac-720cef5933a3/hadoop.tmp.dir so I do NOT create it in target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc 2024-11-24T03:47:10,805 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe, deleteOnExit=true 2024-11-24T03:47:10,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T03:47:10,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/test.cache.data in system properties and HBase conf 2024-11-24T03:47:10,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T03:47:10,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.log.dir in system properties and HBase conf 2024-11-24T03:47:10,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T03:47:10,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T03:47:10,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T03:47:10,806 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T03:47:10,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T03:47:10,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T03:47:10,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T03:47:10,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T03:47:10,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T03:47:10,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T03:47:10,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T03:47:10,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T03:47:10,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T03:47:10,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/nfs.dump.dir in system properties and HBase conf 2024-11-24T03:47:10,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/java.io.tmpdir in system properties and HBase conf 2024-11-24T03:47:10,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T03:47:10,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T03:47:10,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T03:47:10,822 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T03:47:11,073 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:47:11,080 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:47:11,081 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:47:11,081 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:47:11,081 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T03:47:11,082 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:47:11,082 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57980111{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:47:11,082 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48202e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:47:11,193 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2cd29190{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/java.io.tmpdir/jetty-localhost-39643-hadoop-hdfs-3_4_1-tests_jar-_-any-9282310190375443336/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T03:47:11,194 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3cc2b5d2{HTTP/1.1, (http/1.1)}{localhost:39643} 2024-11-24T03:47:11,194 INFO [Time-limited test {}] server.Server(415): Started @110929ms 2024-11-24T03:47:11,212 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T03:47:11,418 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:47:11,422 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:47:11,427 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:47:11,427 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:47:11,427 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T03:47:11,428 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@357bf795{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:47:11,428 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@149f32b8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:47:11,535 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1705bda4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/java.io.tmpdir/jetty-localhost-33405-hadoop-hdfs-3_4_1-tests_jar-_-any-7270402496853635689/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:11,535 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2cbd3715{HTTP/1.1, (http/1.1)}{localhost:33405} 2024-11-24T03:47:11,535 INFO [Time-limited test {}] server.Server(415): Started @111271ms 2024-11-24T03:47:11,537 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:47:11,586 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:47:11,591 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:47:11,592 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:47:11,592 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:47:11,592 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T03:47:11,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f400306{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:47:11,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@630b1d87{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:47:11,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@19a6c3ca{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/java.io.tmpdir/jetty-localhost-35841-hadoop-hdfs-3_4_1-tests_jar-_-any-1770194075202407126/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:11,699 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4e259212{HTTP/1.1, (http/1.1)}{localhost:35841} 2024-11-24T03:47:11,699 INFO [Time-limited test {}] server.Server(415): Started @111434ms 2024-11-24T03:47:11,701 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:47:12,287 WARN [Thread-672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data1/current/BP-258016331-172.17.0.2-1732420030834/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:12,288 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data2/current/BP-258016331-172.17.0.2-1732420030834/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:12,311 WARN [Thread-637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:47:12,314 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x86c38e6b6c7ecb85 with lease ID 0xdc6fc54cb56e3cfb: Processing first storage report for DS-0e85fb5f-660e-48cd-847e-21f6529e0b46 from datanode DatanodeRegistration(127.0.0.1:43649, datanodeUuid=6f11d9ea-ea26-4051-9898-33a42eacee43, infoPort=39387, infoSecurePort=0, ipcPort=34843, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834) 2024-11-24T03:47:12,314 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x86c38e6b6c7ecb85 with lease ID 0xdc6fc54cb56e3cfb: from storage DS-0e85fb5f-660e-48cd-847e-21f6529e0b46 node DatanodeRegistration(127.0.0.1:43649, datanodeUuid=6f11d9ea-ea26-4051-9898-33a42eacee43, infoPort=39387, infoSecurePort=0, ipcPort=34843, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:12,314 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x86c38e6b6c7ecb85 with lease ID 0xdc6fc54cb56e3cfb: Processing first storage report for DS-97ba4ffb-607b-46f3-bbc6-313431afaf7a from datanode DatanodeRegistration(127.0.0.1:43649, datanodeUuid=6f11d9ea-ea26-4051-9898-33a42eacee43, infoPort=39387, infoSecurePort=0, ipcPort=34843, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834) 2024-11-24T03:47:12,314 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x86c38e6b6c7ecb85 with lease ID 0xdc6fc54cb56e3cfb: from storage DS-97ba4ffb-607b-46f3-bbc6-313431afaf7a node DatanodeRegistration(127.0.0.1:43649, datanodeUuid=6f11d9ea-ea26-4051-9898-33a42eacee43, infoPort=39387, infoSecurePort=0, ipcPort=34843, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:12,478 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data4/current/BP-258016331-172.17.0.2-1732420030834/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:12,478 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data3/current/BP-258016331-172.17.0.2-1732420030834/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:12,505 WARN [Thread-660 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:47:12,508 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5467e6e70d46d15a with lease ID 0xdc6fc54cb56e3cfc: Processing first storage report for DS-49d5c796-666a-4b4b-842e-12c9638ede79 from datanode DatanodeRegistration(127.0.0.1:37123, datanodeUuid=df14d960-b4ba-43c0-a068-d20eb718348c, infoPort=41691, infoSecurePort=0, ipcPort=40589, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834) 2024-11-24T03:47:12,508 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5467e6e70d46d15a with lease ID 0xdc6fc54cb56e3cfc: from storage DS-49d5c796-666a-4b4b-842e-12c9638ede79 node DatanodeRegistration(127.0.0.1:37123, datanodeUuid=df14d960-b4ba-43c0-a068-d20eb718348c, infoPort=41691, infoSecurePort=0, ipcPort=40589, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:12,508 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5467e6e70d46d15a with lease ID 0xdc6fc54cb56e3cfc: Processing first storage report for DS-c76f9968-7682-4736-a01d-b023e3bf8f35 from datanode DatanodeRegistration(127.0.0.1:37123, datanodeUuid=df14d960-b4ba-43c0-a068-d20eb718348c, infoPort=41691, infoSecurePort=0, ipcPort=40589, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834) 2024-11-24T03:47:12,508 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5467e6e70d46d15a with lease ID 0xdc6fc54cb56e3cfc: from storage DS-c76f9968-7682-4736-a01d-b023e3bf8f35 node DatanodeRegistration(127.0.0.1:37123, datanodeUuid=df14d960-b4ba-43c0-a068-d20eb718348c, infoPort=41691, infoSecurePort=0, ipcPort=40589, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:12,535 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc 2024-11-24T03:47:12,539 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/zookeeper_0, clientPort=51797, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T03:47:12,540 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51797 2024-11-24T03:47:12,541 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:12,542 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:12,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37123 is added to blk_1073741825_1001 (size=7) 2024-11-24T03:47:12,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43649 is added to blk_1073741825_1001 (size=7) 2024-11-24T03:47:12,553 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a with version=8 2024-11-24T03:47:12,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/hbase-staging 2024-11-24T03:47:12,556 INFO [Time-limited test {}] client.ConnectionUtils(128): master/71d8d2d6408d:0 server-side Connection retries=45 2024-11-24T03:47:12,556 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:12,556 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:12,556 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T03:47:12,557 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:12,557 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T03:47:12,557 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T03:47:12,557 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T03:47:12,558 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35963 2024-11-24T03:47:12,560 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35963 connecting to ZooKeeper ensemble=127.0.0.1:51797 2024-11-24T03:47:12,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:359630x0, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T03:47:12,609 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35963-0x1016c3d6adf0000 connected 2024-11-24T03:47:12,676 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:12,678 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:12,681 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:47:12,681 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a, hbase.cluster.distributed=false 2024-11-24T03:47:12,683 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T03:47:12,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35963 2024-11-24T03:47:12,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35963 2024-11-24T03:47:12,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35963 2024-11-24T03:47:12,685 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35963 2024-11-24T03:47:12,685 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35963 2024-11-24T03:47:12,702 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/71d8d2d6408d:0 server-side Connection retries=45 2024-11-24T03:47:12,702 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:12,702 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:12,702 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T03:47:12,702 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:12,702 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T03:47:12,703 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T03:47:12,703 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T03:47:12,703 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43259 2024-11-24T03:47:12,705 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43259 connecting to ZooKeeper ensemble=127.0.0.1:51797 2024-11-24T03:47:12,706 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:12,708 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:12,717 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:432590x0, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T03:47:12,718 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43259-0x1016c3d6adf0001 connected 2024-11-24T03:47:12,718 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:47:12,718 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T03:47:12,718 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T03:47:12,719 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T03:47:12,720 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T03:47:12,721 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43259 2024-11-24T03:47:12,721 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43259 2024-11-24T03:47:12,722 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43259 2024-11-24T03:47:12,722 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43259 2024-11-24T03:47:12,724 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43259 2024-11-24T03:47:12,739 DEBUG [M:0;71d8d2d6408d:35963 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;71d8d2d6408d:35963 2024-11-24T03:47:12,739 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/71d8d2d6408d,35963,1732420032556 2024-11-24T03:47:12,751 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:47:12,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:47:12,751 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/71d8d2d6408d,35963,1732420032556 2024-11-24T03:47:12,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:12,759 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T03:47:12,759 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:12,759 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T03:47:12,760 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/71d8d2d6408d,35963,1732420032556 from backup master directory 2024-11-24T03:47:12,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/71d8d2d6408d,35963,1732420032556 2024-11-24T03:47:12,767 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:47:12,767 WARN [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T03:47:12,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:47:12,767 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=71d8d2d6408d,35963,1732420032556 2024-11-24T03:47:12,773 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/hbase.id] with ID: 4f316048-d5ac-47bb-ad2e-dce70f905526 2024-11-24T03:47:12,773 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/.tmp/hbase.id 2024-11-24T03:47:12,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43649 is added to blk_1073741826_1002 (size=42) 2024-11-24T03:47:12,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37123 is added to blk_1073741826_1002 (size=42) 2024-11-24T03:47:12,781 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/.tmp/hbase.id]:[hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/hbase.id] 2024-11-24T03:47:12,795 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:12,795 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T03:47:12,797 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-24T03:47:12,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:12,809 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:12,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43649 is added to blk_1073741827_1003 (size=196) 2024-11-24T03:47:12,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37123 is added to blk_1073741827_1003 (size=196) 2024-11-24T03:47:12,822 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T03:47:12,823 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T03:47:12,823 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:47:12,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43649 is added to blk_1073741828_1004 (size=1189) 2024-11-24T03:47:12,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37123 is added to blk_1073741828_1004 (size=1189) 2024-11-24T03:47:12,833 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store 2024-11-24T03:47:12,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43649 is added to blk_1073741829_1005 (size=34) 2024-11-24T03:47:12,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37123 is added to blk_1073741829_1005 (size=34) 2024-11-24T03:47:12,849 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:47:12,849 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T03:47:12,849 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:12,849 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:12,849 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T03:47:12,850 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:12,850 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:12,850 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732420032849Disabling compacts and flushes for region at 1732420032849Disabling writes for close at 1732420032850 (+1 ms)Writing region close event to WAL at 1732420032850Closed at 1732420032850 2024-11-24T03:47:12,850 WARN [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/.initializing 2024-11-24T03:47:12,851 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/WALs/71d8d2d6408d,35963,1732420032556 2024-11-24T03:47:12,854 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C35963%2C1732420032556, suffix=, logDir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/WALs/71d8d2d6408d,35963,1732420032556, archiveDir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/oldWALs, maxLogs=10 2024-11-24T03:47:12,855 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C35963%2C1732420032556.1732420032854 2024-11-24T03:47:12,865 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/WALs/71d8d2d6408d,35963,1732420032556/71d8d2d6408d%2C35963%2C1732420032556.1732420032854 2024-11-24T03:47:12,870 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41691:41691),(127.0.0.1/127.0.0.1:39387:39387)] 2024-11-24T03:47:12,874 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:47:12,874 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:47:12,875 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:12,875 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:12,876 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:12,878 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T03:47:12,878 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:12,879 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:12,879 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:12,881 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T03:47:12,881 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:12,882 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:47:12,882 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:12,883 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T03:47:12,884 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:12,884 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:47:12,884 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:12,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T03:47:12,886 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:12,887 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:47:12,887 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:12,888 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:12,888 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:12,890 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:12,890 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:12,891 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T03:47:12,892 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:12,895 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:47:12,896 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727227, jitterRate=-0.07528339326381683}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T03:47:12,897 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732420032875Initializing all the Stores at 1732420032876 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420032876Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420032876Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420032876Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420032876Cleaning up temporary data from old regions at 1732420032890 (+14 ms)Region opened successfully at 1732420032897 (+7 ms) 2024-11-24T03:47:12,897 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T03:47:12,901 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@460c0c58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=71d8d2d6408d/172.17.0.2:0 2024-11-24T03:47:12,902 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T03:47:12,903 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T03:47:12,903 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T03:47:12,903 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T03:47:12,903 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T03:47:12,904 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T03:47:12,904 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T03:47:12,907 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T03:47:12,908 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T03:47:12,934 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T03:47:12,934 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T03:47:12,936 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T03:47:12,942 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T03:47:12,943 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T03:47:12,944 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T03:47:12,950 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T03:47:12,952 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T03:47:12,959 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T03:47:12,962 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T03:47:12,967 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T03:47:12,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T03:47:12,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:12,976 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T03:47:12,976 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:12,976 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=71d8d2d6408d,35963,1732420032556, sessionid=0x1016c3d6adf0000, setting cluster-up flag (Was=false) 2024-11-24T03:47:12,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:12,992 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:13,017 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T03:47:13,019 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=71d8d2d6408d,35963,1732420032556 2024-11-24T03:47:13,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:13,034 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:13,059 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T03:47:13,061 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=71d8d2d6408d,35963,1732420032556 2024-11-24T03:47:13,063 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T03:47:13,065 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T03:47:13,066 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T03:47:13,066 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T03:47:13,066 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 71d8d2d6408d,35963,1732420032556 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T03:47:13,069 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:47:13,069 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:47:13,069 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:47:13,069 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:47:13,069 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/71d8d2d6408d:0, corePoolSize=10, maxPoolSize=10 2024-11-24T03:47:13,069 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:13,069 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T03:47:13,069 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:13,071 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732420063071 2024-11-24T03:47:13,071 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T03:47:13,071 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T03:47:13,071 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T03:47:13,071 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T03:47:13,071 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T03:47:13,072 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T03:47:13,072 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,072 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:47:13,072 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T03:47:13,072 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T03:47:13,072 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T03:47:13,072 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T03:47:13,072 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T03:47:13,073 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T03:47:13,073 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420033073,5,FailOnTimeoutGroup] 2024-11-24T03:47:13,073 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420033073,5,FailOnTimeoutGroup] 2024-11-24T03:47:13,073 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,073 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T03:47:13,073 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,073 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,073 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:13,074 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T03:47:13,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43649 is added to blk_1073741831_1007 (size=1321) 2024-11-24T03:47:13,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37123 is added to blk_1073741831_1007 (size=1321) 2024-11-24T03:47:13,091 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T03:47:13,091 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a 2024-11-24T03:47:13,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37123 is added to blk_1073741832_1008 (size=32) 2024-11-24T03:47:13,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43649 is added to blk_1073741832_1008 (size=32) 2024-11-24T03:47:13,099 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:47:13,104 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T03:47:13,106 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T03:47:13,106 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:13,107 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:13,107 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T03:47:13,109 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T03:47:13,109 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:13,109 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:13,110 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T03:47:13,111 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T03:47:13,111 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:13,112 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:13,112 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T03:47:13,114 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T03:47:13,114 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:13,115 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:13,115 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T03:47:13,116 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740 2024-11-24T03:47:13,116 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740 2024-11-24T03:47:13,118 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T03:47:13,118 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T03:47:13,118 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T03:47:13,119 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T03:47:13,122 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:47:13,122 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=711671, jitterRate=-0.09506475925445557}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T03:47:13,123 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732420033099Initializing all the Stores at 1732420033100 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420033100Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420033104 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420033104Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420033104Cleaning up temporary data from old regions at 1732420033118 (+14 ms)Region opened successfully at 1732420033123 (+5 ms) 2024-11-24T03:47:13,123 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T03:47:13,123 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T03:47:13,123 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T03:47:13,123 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T03:47:13,123 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T03:47:13,124 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T03:47:13,124 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732420033123Disabling compacts and flushes for region at 1732420033123Disabling writes for close at 1732420033123Writing region close event to WAL at 1732420033124 (+1 ms)Closed at 1732420033124 2024-11-24T03:47:13,125 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:47:13,125 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T03:47:13,125 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T03:47:13,126 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer(746): ClusterId : 4f316048-d5ac-47bb-ad2e-dce70f905526 2024-11-24T03:47:13,126 DEBUG [RS:0;71d8d2d6408d:43259 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T03:47:13,127 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T03:47:13,129 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T03:47:13,143 DEBUG [RS:0;71d8d2d6408d:43259 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T03:47:13,143 DEBUG [RS:0;71d8d2d6408d:43259 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T03:47:13,151 DEBUG [RS:0;71d8d2d6408d:43259 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T03:47:13,152 DEBUG [RS:0;71d8d2d6408d:43259 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@712d347, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=71d8d2d6408d/172.17.0.2:0 2024-11-24T03:47:13,167 DEBUG [RS:0;71d8d2d6408d:43259 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;71d8d2d6408d:43259 2024-11-24T03:47:13,167 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T03:47:13,167 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T03:47:13,167 DEBUG [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T03:47:13,168 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer(2659): reportForDuty to master=71d8d2d6408d,35963,1732420032556 with port=43259, startcode=1732420032702 2024-11-24T03:47:13,168 DEBUG [RS:0;71d8d2d6408d:43259 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T03:47:13,170 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56591, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T03:47:13,171 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35963 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 71d8d2d6408d,43259,1732420032702 2024-11-24T03:47:13,171 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35963 {}] master.ServerManager(517): Registering regionserver=71d8d2d6408d,43259,1732420032702 2024-11-24T03:47:13,173 DEBUG [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a 2024-11-24T03:47:13,173 DEBUG [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44445 2024-11-24T03:47:13,173 DEBUG [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T03:47:13,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T03:47:13,184 DEBUG [RS:0;71d8d2d6408d:43259 {}] zookeeper.ZKUtil(111): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/71d8d2d6408d,43259,1732420032702 2024-11-24T03:47:13,184 WARN [RS:0;71d8d2d6408d:43259 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T03:47:13,184 INFO [RS:0;71d8d2d6408d:43259 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:47:13,184 DEBUG [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702 2024-11-24T03:47:13,185 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [71d8d2d6408d,43259,1732420032702] 2024-11-24T03:47:13,191 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T03:47:13,194 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T03:47:13,195 INFO [RS:0;71d8d2d6408d:43259 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T03:47:13,195 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,199 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T03:47:13,200 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T03:47:13,200 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,200 DEBUG [RS:0;71d8d2d6408d:43259 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:13,200 DEBUG [RS:0;71d8d2d6408d:43259 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:13,200 DEBUG [RS:0;71d8d2d6408d:43259 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:13,200 DEBUG [RS:0;71d8d2d6408d:43259 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:13,200 DEBUG [RS:0;71d8d2d6408d:43259 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:13,200 DEBUG [RS:0;71d8d2d6408d:43259 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/71d8d2d6408d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T03:47:13,200 DEBUG [RS:0;71d8d2d6408d:43259 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:13,200 DEBUG [RS:0;71d8d2d6408d:43259 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:13,200 DEBUG [RS:0;71d8d2d6408d:43259 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:13,201 DEBUG [RS:0;71d8d2d6408d:43259 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:13,201 DEBUG [RS:0;71d8d2d6408d:43259 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:13,201 DEBUG [RS:0;71d8d2d6408d:43259 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:13,201 DEBUG [RS:0;71d8d2d6408d:43259 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/71d8d2d6408d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T03:47:13,201 DEBUG [RS:0;71d8d2d6408d:43259 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T03:47:13,202 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,202 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,203 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,203 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,203 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,203 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,43259,1732420032702-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T03:47:13,223 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T03:47:13,223 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,43259,1732420032702-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,224 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,224 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.Replication(171): 71d8d2d6408d,43259,1732420032702 started 2024-11-24T03:47:13,239 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,239 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer(1482): Serving as 71d8d2d6408d,43259,1732420032702, RpcServer on 71d8d2d6408d/172.17.0.2:43259, sessionid=0x1016c3d6adf0001 2024-11-24T03:47:13,239 DEBUG [RS:0;71d8d2d6408d:43259 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T03:47:13,239 DEBUG [RS:0;71d8d2d6408d:43259 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 71d8d2d6408d,43259,1732420032702 2024-11-24T03:47:13,239 DEBUG [RS:0;71d8d2d6408d:43259 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '71d8d2d6408d,43259,1732420032702' 2024-11-24T03:47:13,239 DEBUG [RS:0;71d8d2d6408d:43259 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T03:47:13,240 DEBUG [RS:0;71d8d2d6408d:43259 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T03:47:13,241 DEBUG [RS:0;71d8d2d6408d:43259 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T03:47:13,241 DEBUG [RS:0;71d8d2d6408d:43259 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T03:47:13,241 DEBUG [RS:0;71d8d2d6408d:43259 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 71d8d2d6408d,43259,1732420032702 2024-11-24T03:47:13,241 DEBUG [RS:0;71d8d2d6408d:43259 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '71d8d2d6408d,43259,1732420032702' 2024-11-24T03:47:13,241 DEBUG [RS:0;71d8d2d6408d:43259 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T03:47:13,241 DEBUG [RS:0;71d8d2d6408d:43259 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T03:47:13,241 DEBUG [RS:0;71d8d2d6408d:43259 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T03:47:13,241 INFO [RS:0;71d8d2d6408d:43259 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T03:47:13,241 INFO [RS:0;71d8d2d6408d:43259 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T03:47:13,279 WARN [71d8d2d6408d:35963 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T03:47:13,344 INFO [RS:0;71d8d2d6408d:43259 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C43259%2C1732420032702, suffix=, logDir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702, archiveDir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/oldWALs, maxLogs=32 2024-11-24T03:47:13,345 INFO [RS:0;71d8d2d6408d:43259 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C43259%2C1732420032702.1732420033345 2024-11-24T03:47:13,360 INFO [RS:0;71d8d2d6408d:43259 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 2024-11-24T03:47:13,367 DEBUG [RS:0;71d8d2d6408d:43259 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41691:41691),(127.0.0.1/127.0.0.1:39387:39387)] 2024-11-24T03:47:13,529 DEBUG [71d8d2d6408d:35963 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T03:47:13,530 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=71d8d2d6408d,43259,1732420032702 2024-11-24T03:47:13,532 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 71d8d2d6408d,43259,1732420032702, state=OPENING 2024-11-24T03:47:13,542 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T03:47:13,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:13,550 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:13,551 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T03:47:13,551 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:47:13,551 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:47:13,551 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=71d8d2d6408d,43259,1732420032702}] 2024-11-24T03:47:13,705 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T03:47:13,706 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41905, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T03:47:13,712 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T03:47:13,712 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:47:13,714 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C43259%2C1732420032702.meta, suffix=.meta, logDir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702, archiveDir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/oldWALs, maxLogs=32 2024-11-24T03:47:13,734 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta 2024-11-24T03:47:13,747 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta 2024-11-24T03:47:13,751 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39387:39387),(127.0.0.1/127.0.0.1:41691:41691)] 2024-11-24T03:47:13,753 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:47:13,753 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T03:47:13,753 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T03:47:13,754 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T03:47:13,754 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T03:47:13,754 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:47:13,754 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T03:47:13,754 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T03:47:13,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T03:47:13,757 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T03:47:13,757 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:13,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:13,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T03:47:13,759 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T03:47:13,759 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:13,759 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:13,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T03:47:13,761 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T03:47:13,761 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:13,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:13,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T03:47:13,763 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T03:47:13,763 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:13,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:13,764 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T03:47:13,765 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740 2024-11-24T03:47:13,766 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740 2024-11-24T03:47:13,768 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T03:47:13,768 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T03:47:13,769 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T03:47:13,770 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T03:47:13,771 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733119, jitterRate=-0.06779181957244873}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T03:47:13,771 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T03:47:13,772 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732420033754Writing region info on filesystem at 1732420033754Initializing all the Stores at 1732420033755 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420033755Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420033756 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420033756Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420033756Cleaning up temporary data from old regions at 1732420033768 (+12 ms)Running coprocessor post-open hooks at 1732420033771 (+3 ms)Region opened successfully at 1732420033772 (+1 ms) 2024-11-24T03:47:13,774 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732420033704 2024-11-24T03:47:13,778 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=71d8d2d6408d,43259,1732420032702 2024-11-24T03:47:13,778 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T03:47:13,778 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T03:47:13,779 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 71d8d2d6408d,43259,1732420032702, state=OPEN 2024-11-24T03:47:13,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T03:47:13,871 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T03:47:13,871 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:47:13,871 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:47:13,871 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=71d8d2d6408d,43259,1732420032702 2024-11-24T03:47:13,876 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T03:47:13,876 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=71d8d2d6408d,43259,1732420032702 in 320 msec 2024-11-24T03:47:13,880 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T03:47:13,880 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 751 msec 2024-11-24T03:47:13,882 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:47:13,882 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T03:47:13,883 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T03:47:13,884 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=71d8d2d6408d,43259,1732420032702, seqNum=-1] 2024-11-24T03:47:13,884 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T03:47:13,885 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49755, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T03:47:13,892 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 827 msec 2024-11-24T03:47:13,892 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732420033892, completionTime=-1 2024-11-24T03:47:13,892 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T03:47:13,892 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T03:47:13,894 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T03:47:13,895 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732420093894 2024-11-24T03:47:13,895 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732420153895 2024-11-24T03:47:13,895 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-24T03:47:13,895 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,35963,1732420032556-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,895 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,35963,1732420032556-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,895 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,35963,1732420032556-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,895 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-71d8d2d6408d:35963, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,895 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,896 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,897 DEBUG [master/71d8d2d6408d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T03:47:13,900 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.132sec 2024-11-24T03:47:13,900 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T03:47:13,900 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T03:47:13,900 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T03:47:13,900 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T03:47:13,900 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T03:47:13,900 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,35963,1732420032556-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T03:47:13,900 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,35963,1732420032556-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T03:47:13,902 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T03:47:13,902 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T03:47:13,902 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,35963,1732420032556-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:13,934 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8bac5bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:47:13,934 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 71d8d2d6408d,35963,-1 for getting cluster id 2024-11-24T03:47:13,934 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T03:47:13,936 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4f316048-d5ac-47bb-ad2e-dce70f905526' 2024-11-24T03:47:13,936 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T03:47:13,937 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4f316048-d5ac-47bb-ad2e-dce70f905526" 2024-11-24T03:47:13,937 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74b6e8b3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:47:13,937 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [71d8d2d6408d,35963,-1] 2024-11-24T03:47:13,937 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T03:47:13,938 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:13,940 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33512, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T03:47:13,941 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38837566, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:47:13,941 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T03:47:13,942 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=71d8d2d6408d,43259,1732420032702, seqNum=-1] 2024-11-24T03:47:13,943 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T03:47:13,945 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59960, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T03:47:13,947 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=71d8d2d6408d,35963,1732420032556 2024-11-24T03:47:13,947 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:13,950 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T03:47:13,969 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/71d8d2d6408d:0 server-side Connection retries=45 2024-11-24T03:47:13,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:13,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:13,970 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T03:47:13,970 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:13,970 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T03:47:13,970 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T03:47:13,970 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T03:47:13,971 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32933 2024-11-24T03:47:13,972 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32933 connecting to ZooKeeper ensemble=127.0.0.1:51797 2024-11-24T03:47:13,973 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:13,974 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:14,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:329330x0, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T03:47:14,001 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32933-0x1016c3d6adf0002 connected 2024-11-24T03:47:14,001 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:32933-0x1016c3d6adf0002, quorum=127.0.0.1:51797, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-24T03:47:14,001 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-24T03:47:14,002 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T03:47:14,003 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T03:47:14,004 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:32933-0x1016c3d6adf0002, quorum=127.0.0.1:51797, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T03:47:14,007 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32933-0x1016c3d6adf0002, quorum=127.0.0.1:51797, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T03:47:14,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32933 2024-11-24T03:47:14,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32933 2024-11-24T03:47:14,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32933 2024-11-24T03:47:14,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32933 2024-11-24T03:47:14,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32933 2024-11-24T03:47:14,011 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.HRegionServer(746): ClusterId : 4f316048-d5ac-47bb-ad2e-dce70f905526 2024-11-24T03:47:14,011 DEBUG [RS:1;71d8d2d6408d:32933 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T03:47:14,018 DEBUG [RS:1;71d8d2d6408d:32933 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T03:47:14,018 DEBUG [RS:1;71d8d2d6408d:32933 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T03:47:14,026 DEBUG [RS:1;71d8d2d6408d:32933 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T03:47:14,027 DEBUG [RS:1;71d8d2d6408d:32933 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a95bd4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=71d8d2d6408d/172.17.0.2:0 2024-11-24T03:47:14,041 DEBUG [RS:1;71d8d2d6408d:32933 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;71d8d2d6408d:32933 2024-11-24T03:47:14,041 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T03:47:14,041 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T03:47:14,042 DEBUG [RS:1;71d8d2d6408d:32933 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T03:47:14,042 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.HRegionServer(2659): reportForDuty to master=71d8d2d6408d,35963,1732420032556 with port=32933, startcode=1732420033969 2024-11-24T03:47:14,043 DEBUG [RS:1;71d8d2d6408d:32933 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T03:47:14,044 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60369, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T03:47:14,044 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35963 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 71d8d2d6408d,32933,1732420033969 2024-11-24T03:47:14,044 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35963 {}] master.ServerManager(517): Registering regionserver=71d8d2d6408d,32933,1732420033969 2024-11-24T03:47:14,046 DEBUG [RS:1;71d8d2d6408d:32933 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a 2024-11-24T03:47:14,046 DEBUG [RS:1;71d8d2d6408d:32933 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44445 2024-11-24T03:47:14,046 DEBUG [RS:1;71d8d2d6408d:32933 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T03:47:14,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T03:47:14,050 DEBUG [RS:1;71d8d2d6408d:32933 {}] zookeeper.ZKUtil(111): regionserver:32933-0x1016c3d6adf0002, quorum=127.0.0.1:51797, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/71d8d2d6408d,32933,1732420033969 2024-11-24T03:47:14,050 WARN [RS:1;71d8d2d6408d:32933 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T03:47:14,051 INFO [RS:1;71d8d2d6408d:32933 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:47:14,051 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [71d8d2d6408d,32933,1732420033969] 2024-11-24T03:47:14,051 DEBUG [RS:1;71d8d2d6408d:32933 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969 2024-11-24T03:47:14,055 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T03:47:14,057 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T03:47:14,057 INFO [RS:1;71d8d2d6408d:32933 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T03:47:14,057 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:14,058 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T03:47:14,059 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T03:47:14,059 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:14,059 DEBUG [RS:1;71d8d2d6408d:32933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:14,059 DEBUG [RS:1;71d8d2d6408d:32933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:14,059 DEBUG [RS:1;71d8d2d6408d:32933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:14,059 DEBUG [RS:1;71d8d2d6408d:32933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:14,059 DEBUG [RS:1;71d8d2d6408d:32933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:14,059 DEBUG [RS:1;71d8d2d6408d:32933 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/71d8d2d6408d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T03:47:14,059 DEBUG [RS:1;71d8d2d6408d:32933 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:14,059 DEBUG [RS:1;71d8d2d6408d:32933 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:14,059 DEBUG [RS:1;71d8d2d6408d:32933 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:14,059 DEBUG [RS:1;71d8d2d6408d:32933 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:14,060 DEBUG [RS:1;71d8d2d6408d:32933 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:14,060 DEBUG [RS:1;71d8d2d6408d:32933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:47:14,060 DEBUG [RS:1;71d8d2d6408d:32933 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/71d8d2d6408d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T03:47:14,060 DEBUG [RS:1;71d8d2d6408d:32933 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T03:47:14,062 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:14,062 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:14,062 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:14,062 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:14,062 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:14,062 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,32933,1732420033969-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T03:47:14,082 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T03:47:14,082 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,32933,1732420033969-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:14,082 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:14,083 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.Replication(171): 71d8d2d6408d,32933,1732420033969 started 2024-11-24T03:47:14,100 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:47:14,100 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.HRegionServer(1482): Serving as 71d8d2d6408d,32933,1732420033969, RpcServer on 71d8d2d6408d/172.17.0.2:32933, sessionid=0x1016c3d6adf0002 2024-11-24T03:47:14,100 DEBUG [RS:1;71d8d2d6408d:32933 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T03:47:14,100 DEBUG [RS:1;71d8d2d6408d:32933 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 71d8d2d6408d,32933,1732420033969 2024-11-24T03:47:14,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;71d8d2d6408d:32933,5,FailOnTimeoutGroup] 2024-11-24T03:47:14,100 DEBUG [RS:1;71d8d2d6408d:32933 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '71d8d2d6408d,32933,1732420033969' 2024-11-24T03:47:14,100 DEBUG [RS:1;71d8d2d6408d:32933 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T03:47:14,100 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-24T03:47:14,100 DEBUG [RS:1;71d8d2d6408d:32933 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T03:47:14,101 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T03:47:14,101 DEBUG [RS:1;71d8d2d6408d:32933 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T03:47:14,101 DEBUG [RS:1;71d8d2d6408d:32933 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T03:47:14,101 DEBUG [RS:1;71d8d2d6408d:32933 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 71d8d2d6408d,32933,1732420033969 2024-11-24T03:47:14,101 DEBUG [RS:1;71d8d2d6408d:32933 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '71d8d2d6408d,32933,1732420033969' 2024-11-24T03:47:14,101 DEBUG [RS:1;71d8d2d6408d:32933 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T03:47:14,101 DEBUG [RS:1;71d8d2d6408d:32933 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T03:47:14,102 DEBUG [RS:1;71d8d2d6408d:32933 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T03:47:14,102 INFO [RS:1;71d8d2d6408d:32933 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T03:47:14,102 INFO [RS:1;71d8d2d6408d:32933 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T03:47:14,102 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 71d8d2d6408d,35963,1732420032556 2024-11-24T03:47:14,102 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@13d4e9ab 2024-11-24T03:47:14,102 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T03:47:14,104 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33528, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T03:47:14,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35963 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T03:47:14,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35963 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T03:47:14,105 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35963 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T03:47:14,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35963 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T03:47:14,108 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T03:47:14,108 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:14,108 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35963 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-24T03:47:14,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35963 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T03:47:14,110 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T03:47:14,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37123 is added to blk_1073741835_1011 (size=393) 2024-11-24T03:47:14,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43649 is added to blk_1073741835_1011 (size=393) 2024-11-24T03:47:14,121 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => eb02e0befce36773ef5c04dd5b1e444f, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a 2024-11-24T03:47:14,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37123 is added to blk_1073741836_1012 (size=76) 2024-11-24T03:47:14,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43649 is added to blk_1073741836_1012 (size=76) 2024-11-24T03:47:14,130 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:47:14,130 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing eb02e0befce36773ef5c04dd5b1e444f, disabling compactions & flushes 2024-11-24T03:47:14,130 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. 2024-11-24T03:47:14,130 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. 2024-11-24T03:47:14,130 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. after waiting 0 ms 2024-11-24T03:47:14,130 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. 2024-11-24T03:47:14,130 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. 2024-11-24T03:47:14,130 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for eb02e0befce36773ef5c04dd5b1e444f: Waiting for close lock at 1732420034130Disabling compacts and flushes for region at 1732420034130Disabling writes for close at 1732420034130Writing region close event to WAL at 1732420034130Closed at 1732420034130 2024-11-24T03:47:14,132 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T03:47:14,132 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732420034132"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732420034132"}]},"ts":"1732420034132"} 2024-11-24T03:47:14,135 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T03:47:14,137 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T03:47:14,138 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732420034137"}]},"ts":"1732420034137"} 2024-11-24T03:47:14,140 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-24T03:47:14,141 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=eb02e0befce36773ef5c04dd5b1e444f, ASSIGN}] 2024-11-24T03:47:14,143 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=eb02e0befce36773ef5c04dd5b1e444f, ASSIGN 2024-11-24T03:47:14,144 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=eb02e0befce36773ef5c04dd5b1e444f, ASSIGN; state=OFFLINE, location=71d8d2d6408d,43259,1732420032702; forceNewPlan=false, retain=false 2024-11-24T03:47:14,205 INFO [RS:1;71d8d2d6408d:32933 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C32933%2C1732420033969, suffix=, logDir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969, archiveDir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/oldWALs, maxLogs=32 2024-11-24T03:47:14,206 INFO [RS:1;71d8d2d6408d:32933 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C32933%2C1732420033969.1732420034206 2024-11-24T03:47:14,214 INFO [RS:1;71d8d2d6408d:32933 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 2024-11-24T03:47:14,215 DEBUG [RS:1;71d8d2d6408d:32933 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39387:39387),(127.0.0.1/127.0.0.1:41691:41691)] 2024-11-24T03:47:14,295 INFO [71d8d2d6408d:35963 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-24T03:47:14,296 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=eb02e0befce36773ef5c04dd5b1e444f, regionState=OPENING, regionLocation=71d8d2d6408d,43259,1732420032702 2024-11-24T03:47:14,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=eb02e0befce36773ef5c04dd5b1e444f, ASSIGN because future has completed 2024-11-24T03:47:14,301 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure eb02e0befce36773ef5c04dd5b1e444f, server=71d8d2d6408d,43259,1732420032702}] 2024-11-24T03:47:14,459 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. 2024-11-24T03:47:14,460 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => eb02e0befce36773ef5c04dd5b1e444f, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f.', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:47:14,460 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:14,460 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:47:14,460 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:14,460 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:14,462 INFO [StoreOpener-eb02e0befce36773ef5c04dd5b1e444f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:14,464 INFO [StoreOpener-eb02e0befce36773ef5c04dd5b1e444f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eb02e0befce36773ef5c04dd5b1e444f columnFamilyName info 2024-11-24T03:47:14,464 DEBUG [StoreOpener-eb02e0befce36773ef5c04dd5b1e444f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:14,464 INFO [StoreOpener-eb02e0befce36773ef5c04dd5b1e444f-1 {}] regionserver.HStore(327): Store=eb02e0befce36773ef5c04dd5b1e444f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:47:14,464 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:14,465 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:14,466 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:14,466 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:14,466 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:14,468 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:14,471 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:47:14,472 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened eb02e0befce36773ef5c04dd5b1e444f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=818649, jitterRate=0.040966615080833435}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T03:47:14,472 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:14,472 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for eb02e0befce36773ef5c04dd5b1e444f: Running coprocessor pre-open hook at 1732420034460Writing region info on filesystem at 1732420034460Initializing all the Stores at 1732420034461 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420034462 (+1 ms)Cleaning up temporary data from old regions at 1732420034466 (+4 ms)Running coprocessor post-open hooks at 1732420034472 (+6 ms)Region opened successfully at 1732420034472 2024-11-24T03:47:14,474 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f., pid=6, masterSystemTime=1732420034455 2024-11-24T03:47:14,477 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. 2024-11-24T03:47:14,477 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. 2024-11-24T03:47:14,478 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=eb02e0befce36773ef5c04dd5b1e444f, regionState=OPEN, openSeqNum=2, regionLocation=71d8d2d6408d,43259,1732420032702 2024-11-24T03:47:14,481 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure eb02e0befce36773ef5c04dd5b1e444f, server=71d8d2d6408d,43259,1732420032702 because future has completed 2024-11-24T03:47:14,485 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T03:47:14,486 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure eb02e0befce36773ef5c04dd5b1e444f, server=71d8d2d6408d,43259,1732420032702 in 181 msec 2024-11-24T03:47:14,488 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T03:47:14,488 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=eb02e0befce36773ef5c04dd5b1e444f, ASSIGN in 344 msec 2024-11-24T03:47:14,490 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T03:47:14,490 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732420034490"}]},"ts":"1732420034490"} 2024-11-24T03:47:14,493 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-24T03:47:14,495 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T03:47:14,498 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 390 msec 2024-11-24T03:47:14,531 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T03:47:14,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:14,550 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:14,553 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:14,554 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:17,406 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T03:47:17,406 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T03:47:17,407 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T03:47:17,408 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-24T03:47:17,408 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T03:47:17,408 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T03:47:17,409 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T03:47:17,409 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T03:47:19,191 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-24T03:47:19,964 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T03:47:19,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:19,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:19,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:19,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:24,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35963 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T03:47:24,172 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-24T03:47:24,172 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-24T03:47:24,175 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T03:47:24,176 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. 2024-11-24T03:47:24,188 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:47:24,191 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:47:24,192 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:47:24,192 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:47:24,192 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T03:47:24,193 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56161739{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:47:24,193 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e0d6f2f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:47:24,298 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ea87f36{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/java.io.tmpdir/jetty-localhost-43945-hadoop-hdfs-3_4_1-tests_jar-_-any-11796401970422753700/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:24,299 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@45b83ed{HTTP/1.1, (http/1.1)}{localhost:43945} 2024-11-24T03:47:24,299 INFO [Time-limited test {}] server.Server(415): Started @124034ms 2024-11-24T03:47:24,300 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:47:24,347 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:47:24,351 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:47:24,354 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:47:24,354 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:47:24,355 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T03:47:24,356 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7645500{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:47:24,356 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70c175e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:47:24,469 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@592c6d7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/java.io.tmpdir/jetty-localhost-45489-hadoop-hdfs-3_4_1-tests_jar-_-any-15059810042098508640/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:24,470 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1d39bcd7{HTTP/1.1, (http/1.1)}{localhost:45489} 2024-11-24T03:47:24,470 INFO [Time-limited test {}] server.Server(415): Started @124205ms 2024-11-24T03:47:24,471 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:47:24,511 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:47:24,514 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:47:24,515 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:47:24,515 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:47:24,515 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T03:47:24,516 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c4a6f04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:47:24,516 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@300b140{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:47:24,622 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59934ee0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/java.io.tmpdir/jetty-localhost-35563-hadoop-hdfs-3_4_1-tests_jar-_-any-15763200409397118150/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:24,622 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a60480b{HTTP/1.1, (http/1.1)}{localhost:35563} 2024-11-24T03:47:24,623 INFO [Time-limited test {}] server.Server(415): Started @124358ms 2024-11-24T03:47:24,624 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:47:25,158 WARN [Thread-867 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data5/current/BP-258016331-172.17.0.2-1732420030834/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:25,159 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data6/current/BP-258016331-172.17.0.2-1732420030834/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:25,177 WARN [Thread-810 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:47:25,179 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1b61456b94007545 with lease ID 0xdc6fc54cb56e3cfd: Processing first storage report for DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a from datanode DatanodeRegistration(127.0.0.1:35875, datanodeUuid=17aeb76d-2b9c-4be7-9637-d6f2a7efeb84, infoPort=41305, infoSecurePort=0, ipcPort=39195, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834) 2024-11-24T03:47:25,179 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1b61456b94007545 with lease ID 0xdc6fc54cb56e3cfd: from storage DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a node DatanodeRegistration(127.0.0.1:35875, datanodeUuid=17aeb76d-2b9c-4be7-9637-d6f2a7efeb84, infoPort=41305, infoSecurePort=0, ipcPort=39195, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:25,179 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1b61456b94007545 with lease ID 0xdc6fc54cb56e3cfd: Processing first storage report for DS-b3be530c-b6d8-43de-89bd-66216d0cc4dc from datanode DatanodeRegistration(127.0.0.1:35875, datanodeUuid=17aeb76d-2b9c-4be7-9637-d6f2a7efeb84, infoPort=41305, infoSecurePort=0, ipcPort=39195, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834) 2024-11-24T03:47:25,179 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1b61456b94007545 with lease ID 0xdc6fc54cb56e3cfd: from storage DS-b3be530c-b6d8-43de-89bd-66216d0cc4dc node DatanodeRegistration(127.0.0.1:35875, datanodeUuid=17aeb76d-2b9c-4be7-9637-d6f2a7efeb84, infoPort=41305, infoSecurePort=0, ipcPort=39195, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:25,455 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data7/current/BP-258016331-172.17.0.2-1732420030834/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:25,455 WARN [Thread-881 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data8/current/BP-258016331-172.17.0.2-1732420030834/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:25,475 WARN [Thread-832 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:47:25,478 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x55846e74affc3ee with lease ID 0xdc6fc54cb56e3cfe: Processing first storage report for DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9 from datanode DatanodeRegistration(127.0.0.1:41679, datanodeUuid=9e114387-e46b-4c21-95c9-bb6c47027a0a, infoPort=42767, infoSecurePort=0, ipcPort=43769, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834) 2024-11-24T03:47:25,478 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55846e74affc3ee with lease ID 0xdc6fc54cb56e3cfe: from storage DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9 node DatanodeRegistration(127.0.0.1:41679, datanodeUuid=9e114387-e46b-4c21-95c9-bb6c47027a0a, infoPort=42767, infoSecurePort=0, ipcPort=43769, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:25,478 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x55846e74affc3ee with lease ID 0xdc6fc54cb56e3cfe: Processing first storage report for DS-b16d3206-74dc-42ff-a37b-30ed5903f243 from datanode DatanodeRegistration(127.0.0.1:41679, datanodeUuid=9e114387-e46b-4c21-95c9-bb6c47027a0a, infoPort=42767, infoSecurePort=0, ipcPort=43769, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834) 2024-11-24T03:47:25,478 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55846e74affc3ee with lease ID 0xdc6fc54cb56e3cfe: from storage DS-b16d3206-74dc-42ff-a37b-30ed5903f243 node DatanodeRegistration(127.0.0.1:41679, datanodeUuid=9e114387-e46b-4c21-95c9-bb6c47027a0a, infoPort=42767, infoSecurePort=0, ipcPort=43769, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:25,556 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data9/current/BP-258016331-172.17.0.2-1732420030834/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:25,556 WARN [Thread-892 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data10/current/BP-258016331-172.17.0.2-1732420030834/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:25,575 WARN [Thread-854 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:47:25,578 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe82b7ee1970fbdf1 with lease ID 0xdc6fc54cb56e3cff: Processing first storage report for DS-bf5923c8-b3dd-480e-91f0-7938e60612c9 from datanode DatanodeRegistration(127.0.0.1:33983, datanodeUuid=c1297a4f-5734-4284-b4e6-8ad7a4cf8c73, infoPort=40579, infoSecurePort=0, ipcPort=35567, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834) 2024-11-24T03:47:25,578 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe82b7ee1970fbdf1 with lease ID 0xdc6fc54cb56e3cff: from storage DS-bf5923c8-b3dd-480e-91f0-7938e60612c9 node DatanodeRegistration(127.0.0.1:33983, datanodeUuid=c1297a4f-5734-4284-b4e6-8ad7a4cf8c73, infoPort=40579, infoSecurePort=0, ipcPort=35567, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:25,578 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe82b7ee1970fbdf1 with lease ID 0xdc6fc54cb56e3cff: Processing first storage report for DS-4a03a4f0-72cc-4ce2-96bd-fe0d88c731df from datanode DatanodeRegistration(127.0.0.1:33983, datanodeUuid=c1297a4f-5734-4284-b4e6-8ad7a4cf8c73, infoPort=40579, infoSecurePort=0, ipcPort=35567, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834) 2024-11-24T03:47:25,578 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe82b7ee1970fbdf1 with lease ID 0xdc6fc54cb56e3cff: from storage DS-4a03a4f0-72cc-4ce2-96bd-fe0d88c731df node DatanodeRegistration(127.0.0.1:33983, datanodeUuid=c1297a4f-5734-4284-b4e6-8ad7a4cf8c73, infoPort=40579, infoSecurePort=0, ipcPort=35567, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:25,664 WARN [ResponseProcessor for block BP-258016331-172.17.0.2-1732420030834:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-258016331-172.17.0.2-1732420030834:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:25,664 WARN [ResponseProcessor for block BP-258016331-172.17.0.2-1732420030834:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-258016331-172.17.0.2-1732420030834:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-258016331-172.17.0.2-1732420030834:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:25,664 WARN [ResponseProcessor for block BP-258016331-172.17.0.2-1732420030834:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-258016331-172.17.0.2-1732420030834:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-258016331-172.17.0.2-1732420030834:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:25,664 WARN [ResponseProcessor for block BP-258016331-172.17.0.2-1732420030834:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-258016331-172.17.0.2-1732420030834:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:25,665 WARN [DataStreamer for file /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta block BP-258016331-172.17.0.2-1732420030834:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK], DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]) is bad. 2024-11-24T03:47:25,665 WARN [DataStreamer for file /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/WALs/71d8d2d6408d,35963,1732420032556/71d8d2d6408d%2C35963%2C1732420032556.1732420032854 block BP-258016331-172.17.0.2-1732420030834:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK], DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]) is bad. 2024-11-24T03:47:25,664 WARN [PacketResponder: BP-258016331-172.17.0.2-1732420030834:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37123] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:25,665 WARN [DataStreamer for file /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 block BP-258016331-172.17.0.2-1732420030834:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK], DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]) is bad. 2024-11-24T03:47:25,665 WARN [DataStreamer for file /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 block BP-258016331-172.17.0.2-1732420030834:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK], DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]) is bad. 2024-11-24T03:47:25,665 WARN [PacketResponder: BP-258016331-172.17.0.2-1732420030834:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37123] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:25,666 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:60190 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43649:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60190 dst: /127.0.0.1:43649 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:25,666 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-73102589_22 at /127.0.0.1:60208 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:43649:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60208 dst: /127.0.0.1:43649 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:25,666 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1274043301_22 at /127.0.0.1:45088 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45088 dst: /127.0.0.1:37123 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:25,666 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:45128 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45128 dst: /127.0.0.1:37123 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:25,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@19a6c3ca{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:25,668 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-73102589_22 at /127.0.0.1:45156 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:37123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45156 dst: /127.0.0.1:37123 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:25,668 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1274043301_22 at /127.0.0.1:60144 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43649:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60144 dst: /127.0.0.1:43649 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:25,668 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:45112 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45112 dst: /127.0.0.1:37123 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:25,669 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:60184 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43649:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60184 dst: /127.0.0.1:43649 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:25,671 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4e259212{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:47:25,671 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:47:25,671 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@630b1d87{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:47:25,672 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f400306{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.log.dir/,STOPPED} 2024-11-24T03:47:25,672 WARN [BP-258016331-172.17.0.2-1732420030834 heartbeating to localhost/127.0.0.1:44445 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:47:25,672 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:47:25,672 WARN [BP-258016331-172.17.0.2-1732420030834 heartbeating to localhost/127.0.0.1:44445 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-258016331-172.17.0.2-1732420030834 (Datanode Uuid df14d960-b4ba-43c0-a068-d20eb718348c) service to localhost/127.0.0.1:44445 2024-11-24T03:47:25,672 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:47:25,673 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data3/current/BP-258016331-172.17.0.2-1732420030834 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:25,673 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data4/current/BP-258016331-172.17.0.2-1732420030834 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:25,673 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:47:25,678 WARN [DataStreamer for file /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta block BP-258016331-172.17.0.2-1732420030834:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:25,678 WARN [DataStreamer for file /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 block BP-258016331-172.17.0.2-1732420030834:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:25,679 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@19a9625 {}] datanode.DataXceiver(331): 127.0.0.1:43649:DataXceiver error processing unknown operation src: /127.0.0.1:41290 dst: /127.0.0.1:43649 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:25,679 WARN [DataStreamer for file /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 block BP-258016331-172.17.0.2-1732420030834:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:25,680 WARN [DataStreamer for file /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/WALs/71d8d2d6408d,35963,1732420032556/71d8d2d6408d%2C35963%2C1732420032556.1732420032854 block BP-258016331-172.17.0.2-1732420030834:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:25,681 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1705bda4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:25,681 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2cbd3715{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:47:25,681 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:47:25,681 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@149f32b8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:47:25,681 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@357bf795{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.log.dir/,STOPPED} 2024-11-24T03:47:25,682 WARN [BP-258016331-172.17.0.2-1732420030834 heartbeating to localhost/127.0.0.1:44445 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:47:25,682 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:47:25,683 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:47:25,683 WARN [BP-258016331-172.17.0.2-1732420030834 heartbeating to localhost/127.0.0.1:44445 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-258016331-172.17.0.2-1732420030834 (Datanode Uuid 6f11d9ea-ea26-4051-9898-33a42eacee43) service to localhost/127.0.0.1:44445 2024-11-24T03:47:25,683 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data1/current/BP-258016331-172.17.0.2-1732420030834 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:25,683 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data2/current/BP-258016331-172.17.0.2-1732420030834 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:25,684 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:47:25,688 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f., hostname=71d8d2d6408d,43259,1732420032702, seqNum=2] 2024-11-24T03:47:25,690 ERROR [FSHLog-0-hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a-prefix:71d8d2d6408d,43259,1732420032702 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:25,690 WARN [FSHLog-0-hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a-prefix:71d8d2d6408d,43259,1732420032702 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:25,690 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 71d8d2d6408d%2C43259%2C1732420032702:(num 1732420033345) roll requested 2024-11-24T03:47:25,691 INFO [regionserver/71d8d2d6408d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C43259%2C1732420032702.1732420045690 2024-11-24T03:47:25,694 WARN [Thread-903 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:25,694 WARN [Thread-903 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK], DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]) is bad. 2024-11-24T03:47:25,694 WARN [Thread-903 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741838_1018 2024-11-24T03:47:25,697 WARN [Thread-903 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK] 2024-11-24T03:47:25,703 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:25,703 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:25,703 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:25,703 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:25,703 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:25,703 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420045690 2024-11-24T03:47:25,704 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:25,704 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:25,705 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-24T03:47:25,705 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-24T03:47:25,705 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 2024-11-24T03:47:25,706 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42767:42767),(127.0.0.1/127.0.0.1:40579:40579)] 2024-11-24T03:47:25,707 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 is not closed yet, will try archiving it next time 2024-11-24T03:47:25,708 WARN [IPC Server handler 1 on default port 44445 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-24T03:47:25,712 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 after 5ms 2024-11-24T03:47:26,061 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:26,825 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:27,707 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:27,708 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420045690 2024-11-24T03:47:27,709 WARN [ResponseProcessor for block BP-258016331-172.17.0.2-1732420030834:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-258016331-172.17.0.2-1732420030834:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:27,710 WARN [DataStreamer for file /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420045690 block BP-258016331-172.17.0.2-1732420030834:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK], DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK]) is bad. 2024-11-24T03:47:27,710 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:42252 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:41679:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42252 dst: /127.0.0.1:41679 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:27,710 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:59608 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:33983:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59608 dst: /127.0.0.1:33983 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:27,711 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@592c6d7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:27,712 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1d39bcd7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:47:27,712 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:47:27,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70c175e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:47:27,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7645500{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.log.dir/,STOPPED} 2024-11-24T03:47:27,713 WARN [BP-258016331-172.17.0.2-1732420030834 heartbeating to localhost/127.0.0.1:44445 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:47:27,713 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:47:27,713 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:47:27,713 WARN [BP-258016331-172.17.0.2-1732420030834 heartbeating to localhost/127.0.0.1:44445 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-258016331-172.17.0.2-1732420030834 (Datanode Uuid 9e114387-e46b-4c21-95c9-bb6c47027a0a) service to localhost/127.0.0.1:44445 2024-11-24T03:47:27,714 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data7/current/BP-258016331-172.17.0.2-1732420030834 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:27,714 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data8/current/BP-258016331-172.17.0.2-1732420030834 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:27,714 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:47:28,061 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:28,825 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:29,707 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:29,708 WARN [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]] 2024-11-24T03:47:29,708 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 71d8d2d6408d%2C43259%2C1732420032702:(num 1732420045690) roll requested 2024-11-24T03:47:29,709 INFO [regionserver/71d8d2d6408d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C43259%2C1732420032702.1732420049708 2024-11-24T03:47:29,713 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 after 4008ms 2024-11-24T03:47:29,713 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:29,713 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK], DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]) is bad. 2024-11-24T03:47:29,713 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741840_1022 2024-11-24T03:47:29,714 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK] 2024-11-24T03:47:29,718 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43649 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:29,718 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:35934 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data6]'}, localName='127.0.0.1:35875', datanodeUuid='17aeb76d-2b9c-4be7-9637-d6f2a7efeb84', xmitsInProgress=0}:Exception transferring block BP-258016331-172.17.0.2-1732420030834:blk_1073741841_1023 to mirror 127.0.0.1:43649 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:29,718 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK], DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]) is bad. 2024-11-24T03:47:29,718 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741841_1023 2024-11-24T03:47:29,718 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:35934 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T03:47:29,718 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:35934 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35934 dst: /127.0.0.1:35875 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:29,719 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T03:47:29,719 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK] 2024-11-24T03:47:29,722 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41679 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:29,722 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:35950 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data6]'}, localName='127.0.0.1:35875', datanodeUuid='17aeb76d-2b9c-4be7-9637-d6f2a7efeb84', xmitsInProgress=0}:Exception transferring block BP-258016331-172.17.0.2-1732420030834:blk_1073741842_1024 to mirror 127.0.0.1:41679 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:29,722 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK], DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK]) is bad. 2024-11-24T03:47:29,722 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741842_1024 2024-11-24T03:47:29,722 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:35950 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T03:47:29,723 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:35950 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35950 dst: /127.0.0.1:35875 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:29,723 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK] 2024-11-24T03:47:29,734 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:29,734 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:29,734 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:29,734 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:29,734 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:29,734 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420045690 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420049708 2024-11-24T03:47:29,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33983 is added to blk_1073741839_1021 (size=3600) 2024-11-24T03:47:29,739 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41305:41305),(127.0.0.1/127.0.0.1:40579:40579)] 2024-11-24T03:47:29,739 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 is not closed yet, will try archiving it next time 2024-11-24T03:47:29,739 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420045690 is not closed yet, will try archiving it next time 2024-11-24T03:47:30,062 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:30,138 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 is not closed yet, will try archiving it next time 2024-11-24T03:47:30,825 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:31,723 WARN [ResponseProcessor for block BP-258016331-172.17.0.2-1732420030834:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-258016331-172.17.0.2-1732420030834:blk_1073741843_1025 java.io.IOException: Bad response ERROR for BP-258016331-172.17.0.2-1732420030834:blk_1073741843_1025 from datanode DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:31,723 WARN [DataStreamer for file /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420049708 block BP-258016331-172.17.0.2-1732420030834:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK], DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]) is bad. 2024-11-24T03:47:31,723 WARN [PacketResponder: BP-258016331-172.17.0.2-1732420030834:blk_1073741843_1025, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33983] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:31,723 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:35952 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35952 dst: /127.0.0.1:35875 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:31,724 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:59620 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:33983:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59620 dst: /127.0.0.1:33983 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:31,739 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:31,739 WARN [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK]] 2024-11-24T03:47:31,739 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 71d8d2d6408d%2C43259%2C1732420032702:(num 1732420049708) roll requested 2024-11-24T03:47:31,740 INFO [regionserver/71d8d2d6408d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C43259%2C1732420032702.1732420051740 2024-11-24T03:47:31,743 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:31,743 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK], DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK]) is bad. 2024-11-24T03:47:31,743 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741844_1027 2024-11-24T03:47:31,744 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK] 2024-11-24T03:47:31,745 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:31,745 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK], DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]) is bad. 2024-11-24T03:47:31,745 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741845_1028 2024-11-24T03:47:31,746 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK] 2024-11-24T03:47:31,748 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:31,748 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK], DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]) is bad. 2024-11-24T03:47:31,748 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741846_1029 2024-11-24T03:47:31,748 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK] 2024-11-24T03:47:31,751 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37123 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:31,751 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:35968 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data6]'}, localName='127.0.0.1:35875', datanodeUuid='17aeb76d-2b9c-4be7-9637-d6f2a7efeb84', xmitsInProgress=0}:Exception transferring block BP-258016331-172.17.0.2-1732420030834:blk_1073741847_1030 to mirror 127.0.0.1:37123 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:31,751 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK], DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]) is bad. 2024-11-24T03:47:31,751 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:35968 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T03:47:31,751 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741847_1030 2024-11-24T03:47:31,751 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:35968 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35968 dst: /127.0.0.1:35875 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:31,752 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK] 2024-11-24T03:47:31,753 WARN [IPC Server handler 4 on default port 44445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T03:47:31,753 WARN [IPC Server handler 4 on default port 44445 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T03:47:31,753 WARN [IPC Server handler 4 on default port 44445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T03:47:31,756 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:31,756 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:31,756 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:31,756 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:31,756 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:31,756 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420049708 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420051740 2024-11-24T03:47:31,757 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41305:41305)] 2024-11-24T03:47:31,757 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 is not closed yet, will try archiving it next time 2024-11-24T03:47:31,757 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420049708 is not closed yet, will try archiving it next time 2024-11-24T03:47:31,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741843_1026 (size=93) 2024-11-24T03:47:31,768 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59934ee0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:31,768 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a60480b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:47:31,768 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:47:31,768 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@300b140{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:47:31,768 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c4a6f04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.log.dir/,STOPPED} 2024-11-24T03:47:31,770 WARN [BP-258016331-172.17.0.2-1732420030834 heartbeating to localhost/127.0.0.1:44445 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:47:31,770 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:47:31,770 WARN [BP-258016331-172.17.0.2-1732420030834 heartbeating to localhost/127.0.0.1:44445 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-258016331-172.17.0.2-1732420030834 (Datanode Uuid c1297a4f-5734-4284-b4e6-8ad7a4cf8c73) service to localhost/127.0.0.1:44445 2024-11-24T03:47:31,770 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:47:31,770 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data9/current/BP-258016331-172.17.0.2-1732420030834 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:31,770 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data10/current/BP-258016331-172.17.0.2-1732420030834 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:31,771 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:47:31,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43259 {}] regionserver.HRegion(8855): Flush requested on eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:31,780 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb02e0befce36773ef5c04dd5b1e444f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T03:47:31,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/deb09140e7534bdd98830e669a127fe4 is 1080, key is row0002/info:/1732420047715/Put/seqid=0 2024-11-24T03:47:31,801 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:31,801 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK], DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]) is bad. 2024-11-24T03:47:31,801 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741849_1032 2024-11-24T03:47:31,801 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK] 2024-11-24T03:47:31,803 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43649 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:31,803 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:35996 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data6]'}, localName='127.0.0.1:35875', datanodeUuid='17aeb76d-2b9c-4be7-9637-d6f2a7efeb84', xmitsInProgress=0}:Exception transferring block BP-258016331-172.17.0.2-1732420030834:blk_1073741850_1033 to mirror 127.0.0.1:43649 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:31,804 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK], DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]) is bad. 2024-11-24T03:47:31,804 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741850_1033 2024-11-24T03:47:31,804 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:35996 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T03:47:31,804 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:35996 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35996 dst: /127.0.0.1:35875 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:31,804 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK] 2024-11-24T03:47:31,805 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:31,806 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK], DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]) is bad. 2024-11-24T03:47:31,806 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741851_1034 2024-11-24T03:47:31,806 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK] 2024-11-24T03:47:31,807 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:31,807 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK], DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK]) is bad. 2024-11-24T03:47:31,807 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741852_1035 2024-11-24T03:47:31,808 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK] 2024-11-24T03:47:31,808 WARN [IPC Server handler 0 on default port 44445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T03:47:31,808 WARN [IPC Server handler 0 on default port 44445 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T03:47:31,808 WARN [IPC Server handler 0 on default port 44445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T03:47:31,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741853_1036 (size=10347) 2024-11-24T03:47:32,062 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:32,161 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 is not closed yet, will try archiving it next time 2024-11-24T03:47:32,162 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420049708 to hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/oldWALs/71d8d2d6408d%2C43259%2C1732420032702.1732420049708 2024-11-24T03:47:32,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/deb09140e7534bdd98830e669a127fe4 2024-11-24T03:47:32,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/deb09140e7534bdd98830e669a127fe4 as hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/deb09140e7534bdd98830e669a127fe4 2024-11-24T03:47:32,228 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/deb09140e7534bdd98830e669a127fe4, entries=5, sequenceid=11, filesize=10.1 K 2024-11-24T03:47:32,229 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for eb02e0befce36773ef5c04dd5b1e444f in 449ms, sequenceid=11, compaction requested=false 2024-11-24T03:47:32,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb02e0befce36773ef5c04dd5b1e444f: 2024-11-24T03:47:32,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43259 {}] regionserver.HRegion(8855): Flush requested on eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:32,407 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb02e0befce36773ef5c04dd5b1e444f 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-24T03:47:32,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/1be3f47689ee4147b3b3465e3c866103 is 1080, key is row0007/info:/1732420051781/Put/seqid=0 2024-11-24T03:47:32,414 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:32,414 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK], DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]) is bad. 2024-11-24T03:47:32,414 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741854_1037 2024-11-24T03:47:32,415 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK] 2024-11-24T03:47:32,416 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:32,417 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK], DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK]) is bad. 2024-11-24T03:47:32,417 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741855_1038 2024-11-24T03:47:32,417 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK] 2024-11-24T03:47:32,420 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43649 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:32,419 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:37678 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data6]'}, localName='127.0.0.1:35875', datanodeUuid='17aeb76d-2b9c-4be7-9637-d6f2a7efeb84', xmitsInProgress=0}:Exception transferring block BP-258016331-172.17.0.2-1732420030834:blk_1073741856_1039 to mirror 127.0.0.1:43649 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:32,420 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK], DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]) is bad. 2024-11-24T03:47:32,420 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:37678 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T03:47:32,420 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741856_1039 2024-11-24T03:47:32,420 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:37678 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37678 dst: /127.0.0.1:35875 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:32,421 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK] 2024-11-24T03:47:32,423 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37123 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:32,423 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:37690 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data6]'}, localName='127.0.0.1:35875', datanodeUuid='17aeb76d-2b9c-4be7-9637-d6f2a7efeb84', xmitsInProgress=0}:Exception transferring block BP-258016331-172.17.0.2-1732420030834:blk_1073741857_1040 to mirror 127.0.0.1:37123 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:32,423 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK], DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]) is bad. 2024-11-24T03:47:32,423 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:37690 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T03:47:32,423 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741857_1040 2024-11-24T03:47:32,423 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:37690 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37690 dst: /127.0.0.1:35875 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:32,424 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK] 2024-11-24T03:47:32,424 WARN [IPC Server handler 4 on default port 44445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T03:47:32,425 WARN [IPC Server handler 4 on default port 44445 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T03:47:32,425 WARN [IPC Server handler 4 on default port 44445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T03:47:32,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741858_1041 (size=12506) 2024-11-24T03:47:32,826 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:32,829 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/1be3f47689ee4147b3b3465e3c866103 2024-11-24T03:47:32,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/1be3f47689ee4147b3b3465e3c866103 as hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/1be3f47689ee4147b3b3465e3c866103 2024-11-24T03:47:32,842 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/1be3f47689ee4147b3b3465e3c866103, entries=7, sequenceid=24, filesize=12.2 K 2024-11-24T03:47:32,843 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for eb02e0befce36773ef5c04dd5b1e444f in 437ms, sequenceid=24, compaction requested=false 2024-11-24T03:47:32,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb02e0befce36773ef5c04dd5b1e444f: 2024-11-24T03:47:32,844 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-24T03:47:32,844 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:47:32,844 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/1be3f47689ee4147b3b3465e3c866103 because midkey is the same as first or last row 2024-11-24T03:47:33,194 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3c6830e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35875, datanodeUuid=17aeb76d-2b9c-4be7-9637-d6f2a7efeb84, infoPort=41305, infoSecurePort=0, ipcPort=39195, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834):Failed to transfer BP-258016331-172.17.0.2-1732420030834:blk_1073741853_1036 to 127.0.0.1:43649 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:33,194 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@68636a5c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35875, datanodeUuid=17aeb76d-2b9c-4be7-9637-d6f2a7efeb84, infoPort=41305, infoSecurePort=0, ipcPort=39195, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834):Failed to transfer BP-258016331-172.17.0.2-1732420030834:blk_1073741843_1026 to 127.0.0.1:37123 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:33,758 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:33,758 WARN [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK]] 2024-11-24T03:47:33,758 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 71d8d2d6408d%2C43259%2C1732420032702:(num 1732420051740) roll requested 2024-11-24T03:47:33,758 INFO [regionserver/71d8d2d6408d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C43259%2C1732420032702.1732420053758 2024-11-24T03:47:33,763 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37123 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:33,762 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:37698 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data6]'}, localName='127.0.0.1:35875', datanodeUuid='17aeb76d-2b9c-4be7-9637-d6f2a7efeb84', xmitsInProgress=0}:Exception transferring block BP-258016331-172.17.0.2-1732420030834:blk_1073741859_1042 to mirror 127.0.0.1:37123 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:33,763 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK], DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]) is bad. 2024-11-24T03:47:33,763 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741859_1042 2024-11-24T03:47:33,763 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:37698 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T03:47:33,763 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:37698 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37698 dst: /127.0.0.1:35875 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:33,763 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK] 2024-11-24T03:47:33,766 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:37704 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data6]'}, localName='127.0.0.1:35875', datanodeUuid='17aeb76d-2b9c-4be7-9637-d6f2a7efeb84', xmitsInProgress=0}:Exception transferring block BP-258016331-172.17.0.2-1732420030834:blk_1073741860_1043 to mirror 127.0.0.1:43649 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:33,766 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:37704 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T03:47:33,766 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:37704 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37704 dst: /127.0.0.1:35875 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:33,766 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43649 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:33,767 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK], DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]) is bad. 2024-11-24T03:47:33,767 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741860_1043 2024-11-24T03:47:33,767 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK] 2024-11-24T03:47:33,770 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41679 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:33,770 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:37720 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data6]'}, localName='127.0.0.1:35875', datanodeUuid='17aeb76d-2b9c-4be7-9637-d6f2a7efeb84', xmitsInProgress=0}:Exception transferring block BP-258016331-172.17.0.2-1732420030834:blk_1073741861_1044 to mirror 127.0.0.1:41679 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:33,770 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK], DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK]) is bad. 2024-11-24T03:47:33,770 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:37720 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T03:47:33,770 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741861_1044 2024-11-24T03:47:33,770 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:37720 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37720 dst: /127.0.0.1:35875 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:33,771 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK] 2024-11-24T03:47:33,773 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:33,773 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK], DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]) is bad. 2024-11-24T03:47:33,773 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741862_1045 2024-11-24T03:47:33,774 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK] 2024-11-24T03:47:33,774 WARN [IPC Server handler 3 on default port 44445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T03:47:33,775 WARN [IPC Server handler 3 on default port 44445 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T03:47:33,775 WARN [IPC Server handler 3 on default port 44445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T03:47:33,777 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:33,777 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:33,777 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:33,778 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:33,778 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:33,778 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420051740 with entries=24, filesize=24.23 KB; new WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420053758 2024-11-24T03:47:33,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741848_1031 (size=24823) 2024-11-24T03:47:33,784 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41305:41305)] 2024-11-24T03:47:33,784 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 is not closed yet, will try archiving it next time 2024-11-24T03:47:33,784 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420051740 is not closed yet, will try archiving it next time 2024-11-24T03:47:33,784 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420045690 to hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/oldWALs/71d8d2d6408d%2C43259%2C1732420032702.1732420045690 2024-11-24T03:47:33,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43259 {}] regionserver.HRegion(8855): Flush requested on eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:33,826 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb02e0befce36773ef5c04dd5b1e444f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T03:47:33,831 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/772a508ab55a4f65a1c7ae06173ffcac is 1079, key is tmprow/info:/1732420053825/Put/seqid=0 2024-11-24T03:47:33,833 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:33,833 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK], DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]) is bad. 2024-11-24T03:47:33,833 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741864_1047 2024-11-24T03:47:33,834 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK] 2024-11-24T03:47:33,835 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:33,836 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK], DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]) is bad. 2024-11-24T03:47:33,836 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741865_1048 2024-11-24T03:47:33,836 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK] 2024-11-24T03:47:33,838 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:33,838 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK], DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK]) is bad. 2024-11-24T03:47:33,838 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741866_1049 2024-11-24T03:47:33,839 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK] 2024-11-24T03:47:33,840 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:33,841 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK], DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]) is bad. 2024-11-24T03:47:33,841 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741867_1050 2024-11-24T03:47:33,841 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK] 2024-11-24T03:47:33,842 WARN [IPC Server handler 1 on default port 44445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T03:47:33,842 WARN [IPC Server handler 1 on default port 44445 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T03:47:33,842 WARN [IPC Server handler 1 on default port 44445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T03:47:33,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741868_1051 (size=6027) 2024-11-24T03:47:34,063 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:34,181 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 is not closed yet, will try archiving it next time 2024-11-24T03:47:34,248 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/772a508ab55a4f65a1c7ae06173ffcac 2024-11-24T03:47:34,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/772a508ab55a4f65a1c7ae06173ffcac as hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/772a508ab55a4f65a1c7ae06173ffcac 2024-11-24T03:47:34,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/772a508ab55a4f65a1c7ae06173ffcac, entries=1, sequenceid=34, filesize=5.9 K 2024-11-24T03:47:34,267 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for eb02e0befce36773ef5c04dd5b1e444f in 441ms, sequenceid=34, compaction requested=true 2024-11-24T03:47:34,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb02e0befce36773ef5c04dd5b1e444f: 2024-11-24T03:47:34,267 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-24T03:47:34,267 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:47:34,267 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/1be3f47689ee4147b3b3465e3c866103 because midkey is the same as first or last row 2024-11-24T03:47:34,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eb02e0befce36773ef5c04dd5b1e444f:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T03:47:34,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:47:34,268 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T03:47:34,269 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T03:47:34,269 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.HStore(1541): eb02e0befce36773ef5c04dd5b1e444f/info is initiating minor compaction (all files) 2024-11-24T03:47:34,269 INFO [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of eb02e0befce36773ef5c04dd5b1e444f/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. 2024-11-24T03:47:34,270 INFO [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/deb09140e7534bdd98830e669a127fe4, hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/1be3f47689ee4147b3b3465e3c866103, hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/772a508ab55a4f65a1c7ae06173ffcac] into tmpdir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp, totalSize=28.2 K 2024-11-24T03:47:34,270 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] compactions.Compactor(225): Compacting deb09140e7534bdd98830e669a127fe4, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732420047715 2024-11-24T03:47:34,271 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1be3f47689ee4147b3b3465e3c866103, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732420051781 2024-11-24T03:47:34,271 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] compactions.Compactor(225): Compacting 772a508ab55a4f65a1c7ae06173ffcac, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732420053825 2024-11-24T03:47:34,286 INFO [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eb02e0befce36773ef5c04dd5b1e444f#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T03:47:34,286 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/a220abdc0bc94950940c110ea60c352b is 1080, key is row0002/info:/1732420047715/Put/seqid=0 2024-11-24T03:47:34,288 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:34,288 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK], DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]) is bad. 2024-11-24T03:47:34,288 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741869_1052 2024-11-24T03:47:34,288 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK] 2024-11-24T03:47:34,289 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:34,290 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK], DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]) is bad. 2024-11-24T03:47:34,290 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741870_1053 2024-11-24T03:47:34,290 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK] 2024-11-24T03:47:34,291 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:34,291 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK], DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]) is bad. 2024-11-24T03:47:34,291 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741871_1054 2024-11-24T03:47:34,292 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK] 2024-11-24T03:47:34,293 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:34,293 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK], DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK]) is bad. 2024-11-24T03:47:34,293 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741872_1055 2024-11-24T03:47:34,294 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK] 2024-11-24T03:47:34,295 WARN [IPC Server handler 3 on default port 44445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T03:47:34,295 WARN [IPC Server handler 3 on default port 44445 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T03:47:34,295 WARN [IPC Server handler 3 on default port 44445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T03:47:34,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741873_1056 (size=17994) 2024-11-24T03:47:34,305 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/a220abdc0bc94950940c110ea60c352b as hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/a220abdc0bc94950940c110ea60c352b 2024-11-24T03:47:34,312 INFO [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in eb02e0befce36773ef5c04dd5b1e444f/info of eb02e0befce36773ef5c04dd5b1e444f into a220abdc0bc94950940c110ea60c352b(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T03:47:34,313 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for eb02e0befce36773ef5c04dd5b1e444f: 2024-11-24T03:47:34,313 INFO [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f., storeName=eb02e0befce36773ef5c04dd5b1e444f/info, priority=13, startTime=1732420054267; duration=0sec 2024-11-24T03:47:34,313 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T03:47:34,313 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:47:34,313 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/a220abdc0bc94950940c110ea60c352b because midkey is the same as first or last row 2024-11-24T03:47:34,313 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T03:47:34,313 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:47:34,313 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/a220abdc0bc94950940c110ea60c352b because midkey is the same as first or last row 2024-11-24T03:47:34,313 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T03:47:34,313 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:47:34,313 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/a220abdc0bc94950940c110ea60c352b because midkey is the same as first or last row 2024-11-24T03:47:34,313 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:47:34,313 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eb02e0befce36773ef5c04dd5b1e444f:info 2024-11-24T03:47:34,826 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:35,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43259 {}] regionserver.HRegion(8855): Flush requested on eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:35,247 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb02e0befce36773ef5c04dd5b1e444f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T03:47:35,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/c8550ecae806470fb53faf2b16aa27d9 is 1079, key is tmprow/info:/1732420055246/Put/seqid=0 2024-11-24T03:47:35,257 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:35,257 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK], DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]) is bad. 2024-11-24T03:47:35,257 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741874_1057 2024-11-24T03:47:35,258 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK] 2024-11-24T03:47:35,259 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:35,260 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK], DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]) is bad. 2024-11-24T03:47:35,260 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741875_1058 2024-11-24T03:47:35,260 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37123,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK] 2024-11-24T03:47:35,262 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:35,262 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK], DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK]) is bad. 2024-11-24T03:47:35,262 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741876_1059 2024-11-24T03:47:35,263 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41679,DS-4ee8d3a7-d63f-43fb-95bc-58c677d4eec9,DISK] 2024-11-24T03:47:35,265 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:35,265 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK], DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]) is bad. 2024-11-24T03:47:35,265 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741877_1060 2024-11-24T03:47:35,266 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK] 2024-11-24T03:47:35,267 WARN [IPC Server handler 0 on default port 44445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T03:47:35,267 WARN [IPC Server handler 0 on default port 44445 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T03:47:35,267 WARN [IPC Server handler 0 on default port 44445 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T03:47:35,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741878_1061 (size=6027) 2024-11-24T03:47:35,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/c8550ecae806470fb53faf2b16aa27d9 2024-11-24T03:47:35,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/c8550ecae806470fb53faf2b16aa27d9 as hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/c8550ecae806470fb53faf2b16aa27d9 2024-11-24T03:47:35,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/c8550ecae806470fb53faf2b16aa27d9, entries=1, sequenceid=45, filesize=5.9 K 2024-11-24T03:47:35,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for eb02e0befce36773ef5c04dd5b1e444f in 445ms, sequenceid=45, compaction requested=false 2024-11-24T03:47:35,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb02e0befce36773ef5c04dd5b1e444f: 2024-11-24T03:47:35,692 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-24T03:47:35,692 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:47:35,692 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/a220abdc0bc94950940c110ea60c352b because midkey is the same as first or last row 2024-11-24T03:47:35,784 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:35,784 WARN [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-24T03:47:35,867 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:47:35,872 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:47:35,875 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:47:35,875 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:47:35,875 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T03:47:35,877 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b1cb5ac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:47:35,878 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54643be5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:47:35,974 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@239bd5a6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/java.io.tmpdir/jetty-localhost-45725-hadoop-hdfs-3_4_1-tests_jar-_-any-6497407732227286203/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:35,975 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15efdeac{HTTP/1.1, (http/1.1)}{localhost:45725} 2024-11-24T03:47:35,975 INFO [Time-limited test {}] server.Server(415): Started @135710ms 2024-11-24T03:47:35,977 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:47:36,063 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:36,183 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3c6830e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35875, datanodeUuid=17aeb76d-2b9c-4be7-9637-d6f2a7efeb84, infoPort=41305, infoSecurePort=0, ipcPort=39195, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834):Failed to transfer BP-258016331-172.17.0.2-1732420030834:blk_1073741848_1031 to 127.0.0.1:37123 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:36,183 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@68636a5c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35875, datanodeUuid=17aeb76d-2b9c-4be7-9637-d6f2a7efeb84, infoPort=41305, infoSecurePort=0, ipcPort=39195, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834):Failed to transfer BP-258016331-172.17.0.2-1732420030834:blk_1073741858_1041 to 127.0.0.1:33983 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:36,303 WARN [Thread-979 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:47:36,314 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xce467fa82a4f3db6 with lease ID 0xdc6fc54cb56e3d00: from storage DS-49d5c796-666a-4b4b-842e-12c9638ede79 node DatanodeRegistration(127.0.0.1:46357, datanodeUuid=df14d960-b4ba-43c0-a068-d20eb718348c, infoPort=42601, infoSecurePort=0, ipcPort=33491, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:36,314 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xce467fa82a4f3db6 with lease ID 0xdc6fc54cb56e3d00: from storage DS-c76f9968-7682-4736-a01d-b023e3bf8f35 node DatanodeRegistration(127.0.0.1:46357, datanodeUuid=df14d960-b4ba-43c0-a068-d20eb718348c, infoPort=42601, infoSecurePort=0, ipcPort=33491, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:36,826 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:37,182 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@68636a5c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35875, datanodeUuid=17aeb76d-2b9c-4be7-9637-d6f2a7efeb84, infoPort=41305, infoSecurePort=0, ipcPort=39195, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834):Failed to transfer BP-258016331-172.17.0.2-1732420030834:blk_1073741873_1056 to 127.0.0.1:43649 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:37,182 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3c6830e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35875, datanodeUuid=17aeb76d-2b9c-4be7-9637-d6f2a7efeb84, infoPort=41305, infoSecurePort=0, ipcPort=39195, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834):Failed to transfer BP-258016331-172.17.0.2-1732420030834:blk_1073741868_1051 to 127.0.0.1:41679 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:37,785 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:38,063 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:38,827 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:39,183 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3c6830e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35875, datanodeUuid=17aeb76d-2b9c-4be7-9637-d6f2a7efeb84, infoPort=41305, infoSecurePort=0, ipcPort=39195, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834):Failed to transfer BP-258016331-172.17.0.2-1732420030834:blk_1073741878_1061 to 127.0.0.1:41679 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:39,785 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:40,064 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:40,828 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:41,786 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:42,064 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:42,534 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T03:47:42,828 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:43,072 ERROR [FSHLog-0-hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData-prefix:71d8d2d6408d,35963,1732420032556 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:43,072 WARN [FSHLog-0-hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData-prefix:71d8d2d6408d,35963,1732420032556 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:43,072 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 71d8d2d6408d%2C35963%2C1732420032556:(num 1732420032854) roll requested 2024-11-24T03:47:43,073 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C35963%2C1732420032556.1732420063072 2024-11-24T03:47:43,076 WARN [Thread-999 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33983 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:43,076 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1274043301_22 at /127.0.0.1:52422 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data6]'}, localName='127.0.0.1:35875', datanodeUuid='17aeb76d-2b9c-4be7-9637-d6f2a7efeb84', xmitsInProgress=0}:Exception transferring block BP-258016331-172.17.0.2-1732420030834:blk_1073741879_1062 to mirror 127.0.0.1:33983 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:43,077 WARN [Thread-999 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK], DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]) is bad. 2024-11-24T03:47:43,077 WARN [Thread-999 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741879_1062 2024-11-24T03:47:43,077 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1274043301_22 at /127.0.0.1:52422 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T03:47:43,077 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1274043301_22 at /127.0.0.1:52422 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52422 dst: /127.0.0.1:35875 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:43,077 WARN [Thread-999 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK] 2024-11-24T03:47:43,082 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:43,082 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:43,082 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:43,082 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:43,082 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:43,082 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/WALs/71d8d2d6408d,35963,1732420032556/71d8d2d6408d%2C35963%2C1732420032556.1732420032854 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/WALs/71d8d2d6408d,35963,1732420032556/71d8d2d6408d%2C35963%2C1732420032556.1732420063072 2024-11-24T03:47:43,083 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:43,083 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:43,083 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/WALs/71d8d2d6408d,35963,1732420032556/71d8d2d6408d%2C35963%2C1732420032556.1732420032854 2024-11-24T03:47:43,083 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42601:42601),(127.0.0.1/127.0.0.1:41305:41305)] 2024-11-24T03:47:43,083 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/WALs/71d8d2d6408d,35963,1732420032556/71d8d2d6408d%2C35963%2C1732420032556.1732420032854 is not closed yet, will try archiving it next time 2024-11-24T03:47:43,083 WARN [IPC Server handler 1 on default port 44445 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/WALs/71d8d2d6408d,35963,1732420032556/71d8d2d6408d%2C35963%2C1732420032556.1732420032854 has not been closed. Lease recovery is in progress. RecoveryId = 1064 for block blk_1073741830_1006 2024-11-24T03:47:43,084 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/WALs/71d8d2d6408d,35963,1732420032556/71d8d2d6408d%2C35963%2C1732420032556.1732420032854 after 1ms 2024-11-24T03:47:43,786 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:44,065 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:45,786 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:46,065 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:46,335 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@480c7f54 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-258016331-172.17.0.2-1732420030834:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:43649,null,null]) java.net.ConnectException: Call From 71d8d2d6408d/172.17.0.2 to localhost:34843 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T03:47:46,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741833_1020 (size=455) 2024-11-24T03:47:46,731 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 to hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/oldWALs/71d8d2d6408d%2C43259%2C1732420032702.1732420033345 2024-11-24T03:47:46,733 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420051740 to hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/oldWALs/71d8d2d6408d%2C43259%2C1732420032702.1732420051740 2024-11-24T03:47:47,085 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/WALs/71d8d2d6408d,35963,1732420032556/71d8d2d6408d%2C35963%2C1732420032556.1732420032854 after 4002ms 2024-11-24T03:47:47,787 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:48,066 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:48,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741833_1020 (size=455) 2024-11-24T03:47:49,788 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:50,066 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:51,661 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C43259%2C1732420032702.1732420071660 2024-11-24T03:47:51,667 WARN [Thread-1011 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33983 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:51,667 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1274043301_22 at /127.0.0.1:52434 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741881_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data6]'}, localName='127.0.0.1:35875', datanodeUuid='17aeb76d-2b9c-4be7-9637-d6f2a7efeb84', xmitsInProgress=0}:Exception transferring block BP-258016331-172.17.0.2-1732420030834:blk_1073741881_1065 to mirror 127.0.0.1:33983 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:51,667 WARN [Thread-1011 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741881_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK], DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]) is bad. 2024-11-24T03:47:51,667 WARN [Thread-1011 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741881_1065 2024-11-24T03:47:51,667 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1274043301_22 at /127.0.0.1:52434 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741881_1065] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T03:47:51,667 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1274043301_22 at /127.0.0.1:52434 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741881_1065] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52434 dst: /127.0.0.1:35875 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:51,667 WARN [Thread-1011 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK] 2024-11-24T03:47:51,671 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:51,671 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:51,672 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:51,672 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:51,672 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:51,672 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420053758 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420071660 2024-11-24T03:47:51,673 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41305:41305),(127.0.0.1/127.0.0.1:42601:42601)] 2024-11-24T03:47:51,673 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420053758 is not closed yet, will try archiving it next time 2024-11-24T03:47:51,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741863_1046 (size=13591) 2024-11-24T03:47:51,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43259 {}] regionserver.HRegion(8855): Flush requested on eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:51,683 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb02e0befce36773ef5c04dd5b1e444f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T03:47:51,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/163bc1802760495faa3b7ff2c9cbb342 is 1080, key is row0013/info:/1732420071674/Put/seqid=0 2024-11-24T03:47:51,693 WARN [Thread-1018 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33983 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:51,693 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:57718 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741883_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data4]'}, localName='127.0.0.1:46357', datanodeUuid='df14d960-b4ba-43c0-a068-d20eb718348c', xmitsInProgress=0}:Exception transferring block BP-258016331-172.17.0.2-1732420030834:blk_1073741883_1067 to mirror 127.0.0.1:33983 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:51,693 WARN [Thread-1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741883_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46357,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK], DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]) is bad. 2024-11-24T03:47:51,693 WARN [Thread-1018 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741883_1067 2024-11-24T03:47:51,693 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:57718 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741883_1067] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T03:47:51,693 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:57718 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741883_1067] {}] datanode.DataXceiver(331): 127.0.0.1:46357:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57718 dst: /127.0.0.1:46357 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:51,694 WARN [Thread-1018 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK] 2024-11-24T03:47:51,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741884_1068 (size=11421) 2024-11-24T03:47:51,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741884_1068 (size=11421) 2024-11-24T03:47:51,703 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/163bc1802760495faa3b7ff2c9cbb342 2024-11-24T03:47:51,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/163bc1802760495faa3b7ff2c9cbb342 as hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/163bc1802760495faa3b7ff2c9cbb342 2024-11-24T03:47:51,714 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/163bc1802760495faa3b7ff2c9cbb342, entries=6, sequenceid=55, filesize=11.2 K 2024-11-24T03:47:51,715 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for eb02e0befce36773ef5c04dd5b1e444f in 32ms, sequenceid=55, compaction requested=true 2024-11-24T03:47:51,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb02e0befce36773ef5c04dd5b1e444f: 2024-11-24T03:47:51,715 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-24T03:47:51,715 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:47:51,715 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/a220abdc0bc94950940c110ea60c352b because midkey is the same as first or last row 2024-11-24T03:47:51,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eb02e0befce36773ef5c04dd5b1e444f:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T03:47:51,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:47:51,715 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T03:47:51,716 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T03:47:51,717 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.HStore(1541): eb02e0befce36773ef5c04dd5b1e444f/info is initiating minor compaction (all files) 2024-11-24T03:47:51,717 INFO [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of eb02e0befce36773ef5c04dd5b1e444f/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. 2024-11-24T03:47:51,717 INFO [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/a220abdc0bc94950940c110ea60c352b, hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/c8550ecae806470fb53faf2b16aa27d9, hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/163bc1802760495faa3b7ff2c9cbb342] into tmpdir=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp, totalSize=34.6 K 2024-11-24T03:47:51,717 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] compactions.Compactor(225): Compacting a220abdc0bc94950940c110ea60c352b, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732420047715 2024-11-24T03:47:51,718 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] compactions.Compactor(225): Compacting c8550ecae806470fb53faf2b16aa27d9, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732420055246 2024-11-24T03:47:51,718 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] compactions.Compactor(225): Compacting 163bc1802760495faa3b7ff2c9cbb342, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732420055652 2024-11-24T03:47:51,733 INFO [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eb02e0befce36773ef5c04dd5b1e444f#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T03:47:51,734 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/1a85e19d48a948aaae302f6549503b5c is 1080, key is row0002/info:/1732420047715/Put/seqid=0 2024-11-24T03:47:51,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741885_1069 (size=23502) 2024-11-24T03:47:51,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741885_1069 (size=23502) 2024-11-24T03:47:51,745 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/1a85e19d48a948aaae302f6549503b5c as hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/1a85e19d48a948aaae302f6549503b5c 2024-11-24T03:47:51,752 INFO [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in eb02e0befce36773ef5c04dd5b1e444f/info of eb02e0befce36773ef5c04dd5b1e444f into 1a85e19d48a948aaae302f6549503b5c(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T03:47:51,752 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for eb02e0befce36773ef5c04dd5b1e444f: 2024-11-24T03:47:51,752 INFO [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f., storeName=eb02e0befce36773ef5c04dd5b1e444f/info, priority=13, startTime=1732420071715; duration=0sec 2024-11-24T03:47:51,752 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-24T03:47:51,752 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:47:51,752 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/1a85e19d48a948aaae302f6549503b5c because midkey is the same as first or last row 2024-11-24T03:47:51,752 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-24T03:47:51,752 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:47:51,752 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/1a85e19d48a948aaae302f6549503b5c because midkey is the same as first or last row 2024-11-24T03:47:51,752 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-24T03:47:51,752 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:47:51,752 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/1a85e19d48a948aaae302f6549503b5c because midkey is the same as first or last row 2024-11-24T03:47:51,753 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:47:51,753 DEBUG [RS:0;71d8d2d6408d:43259-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eb02e0befce36773ef5c04dd5b1e444f:info 2024-11-24T03:47:51,788 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:51,788 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-24T03:47:51,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T03:47:51,900 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T03:47:51,900 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:47:51,901 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:51,901 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:51,901 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T03:47:51,902 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T03:47:51,902 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=821688174, stopped=false 2024-11-24T03:47:51,902 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=71d8d2d6408d,35963,1732420032556 2024-11-24T03:47:51,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32933-0x1016c3d6adf0002, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T03:47:51,967 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T03:47:51,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T03:47:51,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32933-0x1016c3d6adf0002, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:51,967 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:51,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:51,967 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T03:47:51,969 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:47:51,969 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:47:51,970 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32933-0x1016c3d6adf0002, quorum=127.0.0.1:51797, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:47:51,970 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T03:47:51,970 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:47:51,970 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:51,970 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '71d8d2d6408d,43259,1732420032702' ***** 2024-11-24T03:47:51,971 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T03:47:51,971 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '71d8d2d6408d,32933,1732420033969' ***** 2024-11-24T03:47:51,971 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T03:47:51,971 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T03:47:51,971 INFO [RS:0;71d8d2d6408d:43259 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T03:47:51,971 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T03:47:51,971 INFO [RS:0;71d8d2d6408d:43259 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T03:47:51,972 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer(3091): Received CLOSE for eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:51,972 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T03:47:51,972 INFO [RS:1;71d8d2d6408d:32933 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T03:47:51,972 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T03:47:51,972 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer(959): stopping server 71d8d2d6408d,43259,1732420032702 2024-11-24T03:47:51,972 INFO [RS:1;71d8d2d6408d:32933 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T03:47:51,972 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T03:47:51,972 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.HRegionServer(959): stopping server 71d8d2d6408d,32933,1732420033969 2024-11-24T03:47:51,972 INFO [RS:0;71d8d2d6408d:43259 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;71d8d2d6408d:43259. 2024-11-24T03:47:51,972 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T03:47:51,972 DEBUG [RS:0;71d8d2d6408d:43259 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:47:51,972 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing eb02e0befce36773ef5c04dd5b1e444f, disabling compactions & flushes 2024-11-24T03:47:51,972 INFO [RS:1;71d8d2d6408d:32933 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;71d8d2d6408d:32933. 2024-11-24T03:47:51,972 DEBUG [RS:0;71d8d2d6408d:43259 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:51,972 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. 2024-11-24T03:47:51,973 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. 2024-11-24T03:47:51,973 DEBUG [RS:1;71d8d2d6408d:32933 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:47:51,973 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. after waiting 0 ms 2024-11-24T03:47:51,973 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T03:47:51,973 DEBUG [RS:1;71d8d2d6408d:32933 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:51,973 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. 2024-11-24T03:47:51,973 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T03:47:51,973 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T03:47:51,973 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.HRegionServer(976): stopping server 71d8d2d6408d,32933,1732420033969; all regions closed. 2024-11-24T03:47:51,973 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T03:47:51,973 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing eb02e0befce36773ef5c04dd5b1e444f 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-11-24T03:47:51,973 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:51,973 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T03:47:51,973 DEBUG [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, eb02e0befce36773ef5c04dd5b1e444f=TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f.} 2024-11-24T03:47:51,974 DEBUG [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, eb02e0befce36773ef5c04dd5b1e444f 2024-11-24T03:47:51,974 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:51,974 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T03:47:51,974 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:51,974 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:51,974 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:51,974 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T03:47:51,974 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T03:47:51,974 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T03:47:51,974 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T03:47:51,975 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:51,975 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-24T03:47:51,975 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:51,975 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 2024-11-24T03:47:51,975 ERROR [FSHLog-0-hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a-prefix:71d8d2d6408d,43259,1732420032702.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:51,975 WARN [IPC Server handler 4 on default port 44445 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 has not been closed. Lease recovery is in progress. RecoveryId = 1070 for block blk_1073741837_1013 2024-11-24T03:47:51,975 WARN [FSHLog-0-hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a-prefix:71d8d2d6408d,43259,1732420032702.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:51,976 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 71d8d2d6408d%2C43259%2C1732420032702.meta:.meta(num 1732420033733) roll requested 2024-11-24T03:47:51,976 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 after 1ms 2024-11-24T03:47:51,976 INFO [regionserver/71d8d2d6408d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C43259%2C1732420032702.meta.1732420071976.meta 2024-11-24T03:47:51,978 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/fc1289dc3b9342999f3cf3e579751e3f is 1080, key is row0018/info:/1732420071684/Put/seqid=0 2024-11-24T03:47:51,981 WARN [Thread-1035 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33983 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:51,981 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:52722 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741887_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data6]'}, localName='127.0.0.1:35875', datanodeUuid='17aeb76d-2b9c-4be7-9637-d6f2a7efeb84', xmitsInProgress=0}:Exception transferring block BP-258016331-172.17.0.2-1732420030834:blk_1073741887_1072 to mirror 127.0.0.1:33983 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:51,981 WARN [Thread-1035 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741887_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK], DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]) is bad. 2024-11-24T03:47:51,981 WARN [Thread-1035 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741887_1072 2024-11-24T03:47:51,981 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:52722 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741887_1072] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T03:47:51,981 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:52722 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741887_1072] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52722 dst: /127.0.0.1:35875 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:51,982 WARN [Thread-1035 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK] 2024-11-24T03:47:51,982 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:51,982 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:51,982 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:51,982 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:51,982 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:51,982 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420071976.meta 2024-11-24T03:47:51,984 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:51,984 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43649,DS-0e85fb5f-660e-48cd-847e-21f6529e0b46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:51,984 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta 2024-11-24T03:47:51,985 WARN [IPC Server handler 3 on default port 44445 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741834_1010 2024-11-24T03:47:51,985 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta after 1ms 2024-11-24T03:47:51,985 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42601:42601),(127.0.0.1/127.0.0.1:41305:41305)] 2024-11-24T03:47:51,985 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta is not closed yet, will try archiving it next time 2024-11-24T03:47:51,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741888_1073 (size=11421) 2024-11-24T03:47:51,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741888_1073 (size=11421) 2024-11-24T03:47:51,988 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/fc1289dc3b9342999f3cf3e579751e3f 2024-11-24T03:47:51,994 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/.tmp/info/fc1289dc3b9342999f3cf3e579751e3f as hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/fc1289dc3b9342999f3cf3e579751e3f 2024-11-24T03:47:52,000 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/fc1289dc3b9342999f3cf3e579751e3f, entries=6, sequenceid=65, filesize=11.2 K 2024-11-24T03:47:52,001 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for eb02e0befce36773ef5c04dd5b1e444f in 28ms, sequenceid=65, compaction requested=false 2024-11-24T03:47:52,001 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/deb09140e7534bdd98830e669a127fe4, hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/1be3f47689ee4147b3b3465e3c866103, hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/a220abdc0bc94950940c110ea60c352b, hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/772a508ab55a4f65a1c7ae06173ffcac, hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/c8550ecae806470fb53faf2b16aa27d9, hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/163bc1802760495faa3b7ff2c9cbb342] to archive 2024-11-24T03:47:52,003 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T03:47:52,003 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/.tmp/info/d68170f57b2744599938359259fc55bf is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f./info:regioninfo/1732420034478/Put/seqid=0 2024-11-24T03:47:52,005 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/deb09140e7534bdd98830e669a127fe4 to hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/deb09140e7534bdd98830e669a127fe4 2024-11-24T03:47:52,006 WARN [Thread-1048 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33983 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:52,006 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:52734 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741889_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data6]'}, localName='127.0.0.1:35875', datanodeUuid='17aeb76d-2b9c-4be7-9637-d6f2a7efeb84', xmitsInProgress=0}:Exception transferring block BP-258016331-172.17.0.2-1732420030834:blk_1073741889_1075 to mirror 127.0.0.1:33983 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:52,006 WARN [Thread-1048 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741889_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35875,DS-9317df1f-ddac-4a59-bb8a-e8baa9a3b80a,DISK], DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]) is bad. 2024-11-24T03:47:52,006 WARN [Thread-1048 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741889_1075 2024-11-24T03:47:52,006 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:52734 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741889_1075] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T03:47:52,006 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413175620_22 at /127.0.0.1:52734 [Receiving block BP-258016331-172.17.0.2-1732420030834:blk_1073741889_1075] {}] datanode.DataXceiver(331): 127.0.0.1:35875:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52734 dst: /127.0.0.1:35875 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:52,006 WARN [Thread-1048 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK] 2024-11-24T03:47:52,007 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/1be3f47689ee4147b3b3465e3c866103 to hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/1be3f47689ee4147b3b3465e3c866103 2024-11-24T03:47:52,008 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/a220abdc0bc94950940c110ea60c352b to hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/a220abdc0bc94950940c110ea60c352b 2024-11-24T03:47:52,009 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/772a508ab55a4f65a1c7ae06173ffcac to hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/772a508ab55a4f65a1c7ae06173ffcac 2024-11-24T03:47:52,010 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/c8550ecae806470fb53faf2b16aa27d9 to hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/c8550ecae806470fb53faf2b16aa27d9 2024-11-24T03:47:52,012 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/163bc1802760495faa3b7ff2c9cbb342 to hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/info/163bc1802760495faa3b7ff2c9cbb342 2024-11-24T03:47:52,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741890_1076 (size=7089) 2024-11-24T03:47:52,013 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=71d8d2d6408d:35963 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T03:47:52,013 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [deb09140e7534bdd98830e669a127fe4=10347, 1be3f47689ee4147b3b3465e3c866103=12506, a220abdc0bc94950940c110ea60c352b=17994, 772a508ab55a4f65a1c7ae06173ffcac=6027, c8550ecae806470fb53faf2b16aa27d9=6027, 163bc1802760495faa3b7ff2c9cbb342=11421] 2024-11-24T03:47:52,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741890_1076 (size=7089) 2024-11-24T03:47:52,014 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/.tmp/info/d68170f57b2744599938359259fc55bf 2024-11-24T03:47:52,017 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/eb02e0befce36773ef5c04dd5b1e444f/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-11-24T03:47:52,017 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. 2024-11-24T03:47:52,017 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for eb02e0befce36773ef5c04dd5b1e444f: Waiting for close lock at 1732420071972Running coprocessor pre-close hooks at 1732420071972Disabling compacts and flushes for region at 1732420071972Disabling writes for close at 1732420071973 (+1 ms)Obtaining lock to block concurrent updates at 1732420071973Preparing flush snapshotting stores in eb02e0befce36773ef5c04dd5b1e444f at 1732420071973Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f., syncing WAL and waiting on mvcc, flushsize=dataSize=6455, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1732420071973Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. at 1732420071975 (+2 ms)Flushing eb02e0befce36773ef5c04dd5b1e444f/info: creating writer at 1732420071975Flushing eb02e0befce36773ef5c04dd5b1e444f/info: appending metadata at 1732420071978 (+3 ms)Flushing eb02e0befce36773ef5c04dd5b1e444f/info: closing flushed file at 1732420071978Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1516b0ce: reopening flushed file at 1732420071993 (+15 ms)Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for eb02e0befce36773ef5c04dd5b1e444f in 28ms, sequenceid=65, compaction requested=false at 1732420072001 (+8 ms)Writing region close event to WAL at 1732420072014 (+13 ms)Running coprocessor post-close hooks at 1732420072017 (+3 ms)Closed at 1732420072017 2024-11-24T03:47:52,018 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732420034104.eb02e0befce36773ef5c04dd5b1e444f. 2024-11-24T03:47:52,033 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/.tmp/ns/0b6f93d10e3145ae858792582eee1703 is 43, key is default/ns:d/1732420033886/Put/seqid=0 2024-11-24T03:47:52,034 WARN [Thread-1056 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:47:52,035 WARN [Thread-1056 {}] hdfs.DataStreamer(1731): Error Recovery for BP-258016331-172.17.0.2-1732420030834:blk_1073741891_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK], DatanodeInfoWithStorage[127.0.0.1:46357,DS-49d5c796-666a-4b4b-842e-12c9638ede79,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK]) is bad. 2024-11-24T03:47:52,035 WARN [Thread-1056 {}] hdfs.DataStreamer(1850): Abandoning BP-258016331-172.17.0.2-1732420030834:blk_1073741891_1077 2024-11-24T03:47:52,035 WARN [Thread-1056 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33983,DS-bf5923c8-b3dd-480e-91f0-7938e60612c9,DISK] 2024-11-24T03:47:52,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741892_1078 (size=5153) 2024-11-24T03:47:52,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741892_1078 (size=5153) 2024-11-24T03:47:52,040 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/.tmp/ns/0b6f93d10e3145ae858792582eee1703 2024-11-24T03:47:52,059 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/.tmp/table/f0aef67719314f84b36f1fe58b85b23e is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732420034490/Put/seqid=0 2024-11-24T03:47:52,062 INFO [regionserver/71d8d2d6408d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T03:47:52,062 INFO [regionserver/71d8d2d6408d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T03:47:52,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741893_1079 (size=5424) 2024-11-24T03:47:52,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741893_1079 (size=5424) 2024-11-24T03:47:52,064 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/.tmp/table/f0aef67719314f84b36f1fe58b85b23e 2024-11-24T03:47:52,068 INFO [regionserver/71d8d2d6408d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T03:47:52,071 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/.tmp/info/d68170f57b2744599938359259fc55bf as hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/info/d68170f57b2744599938359259fc55bf 2024-11-24T03:47:52,074 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.1732420053758 to hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/oldWALs/71d8d2d6408d%2C43259%2C1732420032702.1732420053758 2024-11-24T03:47:52,076 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/info/d68170f57b2744599938359259fc55bf, entries=10, sequenceid=11, filesize=6.9 K 2024-11-24T03:47:52,077 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/.tmp/ns/0b6f93d10e3145ae858792582eee1703 as hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/ns/0b6f93d10e3145ae858792582eee1703 2024-11-24T03:47:52,082 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/ns/0b6f93d10e3145ae858792582eee1703, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T03:47:52,084 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/.tmp/table/f0aef67719314f84b36f1fe58b85b23e as hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/table/f0aef67719314f84b36f1fe58b85b23e 2024-11-24T03:47:52,089 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/table/f0aef67719314f84b36f1fe58b85b23e, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T03:47:52,091 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 117ms, sequenceid=11, compaction requested=false 2024-11-24T03:47:52,096 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T03:47:52,096 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T03:47:52,096 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T03:47:52,096 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732420071974Running coprocessor pre-close hooks at 1732420071974Disabling compacts and flushes for region at 1732420071974Disabling writes for close at 1732420071974Obtaining lock to block concurrent updates at 1732420071975 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732420071975Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732420071975Flushing stores of hbase:meta,,1.1588230740 at 1732420071986 (+11 ms)Flushing 1588230740/info: creating writer at 1732420071986Flushing 1588230740/info: appending metadata at 1732420072003 (+17 ms)Flushing 1588230740/info: closing flushed file at 1732420072003Flushing 1588230740/ns: creating writer at 1732420072019 (+16 ms)Flushing 1588230740/ns: appending metadata at 1732420072032 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1732420072032Flushing 1588230740/table: creating writer at 1732420072046 (+14 ms)Flushing 1588230740/table: appending metadata at 1732420072059 (+13 ms)Flushing 1588230740/table: closing flushed file at 1732420072059Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3719247f: reopening flushed file at 1732420072070 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@263b320d: reopening flushed file at 1732420072077 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1216be8d: reopening flushed file at 1732420072083 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 117ms, sequenceid=11, compaction requested=false at 1732420072091 (+8 ms)Writing region close event to WAL at 1732420072092 (+1 ms)Running coprocessor post-close hooks at 1732420072096 (+4 ms)Closed at 1732420072096 2024-11-24T03:47:52,097 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T03:47:52,174 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer(976): stopping server 71d8d2d6408d,43259,1732420032702; all regions closed. 2024-11-24T03:47:52,174 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:52,175 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:52,175 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:52,175 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:52,175 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:52,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741886_1071 (size=825) 2024-11-24T03:47:52,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741886_1071 (size=825) 2024-11-24T03:47:52,203 INFO [regionserver/71d8d2d6408d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T03:47:52,203 INFO [regionserver/71d8d2d6408d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T03:47:53,208 INFO [regionserver/71d8d2d6408d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T03:47:54,315 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2823801b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46357, datanodeUuid=df14d960-b4ba-43c0-a068-d20eb718348c, infoPort=42601, infoSecurePort=0, ipcPort=33491, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834):Failed to transfer BP-258016331-172.17.0.2-1732420030834:blk_1073741832_1008 to 127.0.0.1:33983 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:54,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741836_1012 (size=76) 2024-11-24T03:47:54,734 INFO [master/71d8d2d6408d:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T03:47:54,734 INFO [master/71d8d2d6408d:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T03:47:55,309 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2823801b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46357, datanodeUuid=df14d960-b4ba-43c0-a068-d20eb718348c, infoPort=42601, infoSecurePort=0, ipcPort=33491, storageInfo=lv=-57;cid=testClusterID;nsid=448418042;c=1732420030834):Failed to transfer BP-258016331-172.17.0.2-1732420030834:blk_1073741828_1004 to 127.0.0.1:33983 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:55,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741826_1002 (size=42) 2024-11-24T03:47:55,977 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 after 4002ms 2024-11-24T03:47:55,986 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta after 4002ms 2024-11-24T03:47:56,339 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7b16cc27 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-258016331-172.17.0.2-1732420030834:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:43649,null,null]) java.net.ConnectException: Call From 71d8d2d6408d/172.17.0.2 to localhost:34843 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T03:47:56,975 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-24T03:47:56,978 DEBUG [RS:1;71d8d2d6408d:32933 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/oldWALs 2024-11-24T03:47:56,978 INFO [RS:1;71d8d2d6408d:32933 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 71d8d2d6408d%2C32933%2C1732420033969:(num 1732420034206) 2024-11-24T03:47:56,978 DEBUG [RS:1;71d8d2d6408d:32933 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:56,979 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T03:47:56,979 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T03:47:56,979 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.ChoreService(370): Chore service for: regionserver/71d8d2d6408d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T03:47:56,980 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T03:47:56,980 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T03:47:56,980 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T03:47:56,980 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T03:47:56,980 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T03:47:56,980 INFO [RS:1;71d8d2d6408d:32933 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32933 2024-11-24T03:47:56,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:47:57,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T03:47:57,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32933-0x1016c3d6adf0002, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/71d8d2d6408d,32933,1732420033969 2024-11-24T03:47:57,028 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T03:47:57,035 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,036 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,036 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,036 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [71d8d2d6408d,32933,1732420033969] 2024-11-24T03:47:57,036 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,037 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,044 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/71d8d2d6408d,32933,1732420033969 already deleted, retry=false 2024-11-24T03:47:57,045 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 71d8d2d6408d,32933,1732420033969 expired; onlineServers=1 2024-11-24T03:47:57,053 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,054 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32933-0x1016c3d6adf0002, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:47:57,137 INFO [RS:1;71d8d2d6408d:32933 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T03:47:57,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32933-0x1016c3d6adf0002, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:47:57,137 INFO [RS:1;71d8d2d6408d:32933 {}] regionserver.HRegionServer(1031): Exiting; stopping=71d8d2d6408d,32933,1732420033969; zookeeper connection closed. 2024-11-24T03:47:57,138 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7e3d5ba7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7e3d5ba7 2024-11-24T03:47:57,175 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-24T03:47:57,185 DEBUG [RS:0;71d8d2d6408d:43259 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/oldWALs 2024-11-24T03:47:57,185 INFO [RS:0;71d8d2d6408d:43259 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 71d8d2d6408d%2C43259%2C1732420032702.meta:.meta(num 1732420071976) 2024-11-24T03:47:57,185 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:57,186 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:57,186 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:57,186 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:57,186 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:57,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741882_1066 (size=15140) 2024-11-24T03:47:57,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741882_1066 (size=15140) 2024-11-24T03:47:57,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741863_1046 (size=13591) 2024-11-24T03:47:57,191 DEBUG [RS:0;71d8d2d6408d:43259 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/oldWALs 2024-11-24T03:47:57,191 INFO [RS:0;71d8d2d6408d:43259 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 71d8d2d6408d%2C43259%2C1732420032702:(num 1732420071660) 2024-11-24T03:47:57,191 DEBUG [RS:0;71d8d2d6408d:43259 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:47:57,191 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T03:47:57,191 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T03:47:57,191 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.ChoreService(370): Chore service for: regionserver/71d8d2d6408d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T03:47:57,191 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T03:47:57,191 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T03:47:57,192 INFO [RS:0;71d8d2d6408d:43259 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43259 2024-11-24T03:47:57,215 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/71d8d2d6408d,43259,1732420032702 2024-11-24T03:47:57,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T03:47:57,215 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T03:47:57,224 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [71d8d2d6408d,43259,1732420032702] 2024-11-24T03:47:57,232 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/71d8d2d6408d,43259,1732420032702 already deleted, retry=false 2024-11-24T03:47:57,232 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 71d8d2d6408d,43259,1732420032702 expired; onlineServers=0 2024-11-24T03:47:57,232 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '71d8d2d6408d,35963,1732420032556' ***** 2024-11-24T03:47:57,232 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T03:47:57,232 INFO [M:0;71d8d2d6408d:35963 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T03:47:57,232 INFO [M:0;71d8d2d6408d:35963 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T03:47:57,232 DEBUG [M:0;71d8d2d6408d:35963 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T03:47:57,232 DEBUG [M:0;71d8d2d6408d:35963 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T03:47:57,232 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T03:47:57,232 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420033073 {}] cleaner.HFileCleaner(306): Exit Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420033073,5,FailOnTimeoutGroup] 2024-11-24T03:47:57,232 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420033073 {}] cleaner.HFileCleaner(306): Exit Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420033073,5,FailOnTimeoutGroup] 2024-11-24T03:47:57,233 INFO [M:0;71d8d2d6408d:35963 {}] hbase.ChoreService(370): Chore service for: master/71d8d2d6408d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T03:47:57,233 INFO [M:0;71d8d2d6408d:35963 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T03:47:57,233 DEBUG [M:0;71d8d2d6408d:35963 {}] master.HMaster(1795): Stopping service threads 2024-11-24T03:47:57,233 INFO [M:0;71d8d2d6408d:35963 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T03:47:57,233 INFO [M:0;71d8d2d6408d:35963 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T03:47:57,233 INFO [M:0;71d8d2d6408d:35963 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T03:47:57,233 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T03:47:57,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T03:47:57,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:57,240 DEBUG [M:0;71d8d2d6408d:35963 {}] zookeeper.ZKUtil(347): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T03:47:57,240 WARN [M:0;71d8d2d6408d:35963 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T03:47:57,241 INFO [M:0;71d8d2d6408d:35963 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/.lastflushedseqids 2024-11-24T03:47:57,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741894_1080 (size=130) 2024-11-24T03:47:57,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741894_1080 (size=130) 2024-11-24T03:47:57,247 INFO [M:0;71d8d2d6408d:35963 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T03:47:57,247 INFO [M:0;71d8d2d6408d:35963 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T03:47:57,248 DEBUG [M:0;71d8d2d6408d:35963 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T03:47:57,248 INFO [M:0;71d8d2d6408d:35963 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:57,248 DEBUG [M:0;71d8d2d6408d:35963 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:57,248 DEBUG [M:0;71d8d2d6408d:35963 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T03:47:57,248 DEBUG [M:0;71d8d2d6408d:35963 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:57,248 INFO [M:0;71d8d2d6408d:35963 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-24T03:47:57,262 DEBUG [M:0;71d8d2d6408d:35963 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fdbbad43ec9142eb96d3d66b37f3c50d is 82, key is hbase:meta,,1/info:regioninfo/1732420033777/Put/seqid=0 2024-11-24T03:47:57,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741895_1081 (size=5672) 2024-11-24T03:47:57,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741895_1081 (size=5672) 2024-11-24T03:47:57,268 INFO [M:0;71d8d2d6408d:35963 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fdbbad43ec9142eb96d3d66b37f3c50d 2024-11-24T03:47:57,287 DEBUG [M:0;71d8d2d6408d:35963 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/90abb6b223a840edbde1e3026bbd5ce8 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732420034497/Put/seqid=0 2024-11-24T03:47:57,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741896_1082 (size=6255) 2024-11-24T03:47:57,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741896_1082 (size=6255) 2024-11-24T03:47:57,293 INFO [M:0;71d8d2d6408d:35963 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/90abb6b223a840edbde1e3026bbd5ce8 2024-11-24T03:47:57,298 INFO [M:0;71d8d2d6408d:35963 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 90abb6b223a840edbde1e3026bbd5ce8 2024-11-24T03:47:57,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741827_1003 (size=196) 2024-11-24T03:47:57,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741829_1005 (size=34) 2024-11-24T03:47:57,313 DEBUG [M:0;71d8d2d6408d:35963 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/089bedf302d34bb988700b715009e8cb is 69, key is 71d8d2d6408d,32933,1732420033969/rs:state/1732420034045/Put/seqid=0 2024-11-24T03:47:57,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741897_1083 (size=5224) 2024-11-24T03:47:57,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741897_1083 (size=5224) 2024-11-24T03:47:57,318 INFO [M:0;71d8d2d6408d:35963 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/089bedf302d34bb988700b715009e8cb 2024-11-24T03:47:57,324 INFO [RS:0;71d8d2d6408d:43259 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T03:47:57,324 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:47:57,324 INFO [RS:0;71d8d2d6408d:43259 {}] regionserver.HRegionServer(1031): Exiting; stopping=71d8d2d6408d,43259,1732420032702; zookeeper connection closed. 2024-11-24T03:47:57,324 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43259-0x1016c3d6adf0001, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:47:57,324 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5bea83da {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5bea83da 2024-11-24T03:47:57,324 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-24T03:47:57,341 DEBUG [M:0;71d8d2d6408d:35963 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a5828a47a944157a7810f7172d1310f is 52, key is load_balancer_on/state:d/1732420033949/Put/seqid=0 2024-11-24T03:47:57,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741898_1084 (size=5056) 2024-11-24T03:47:57,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741898_1084 (size=5056) 2024-11-24T03:47:57,347 INFO [M:0;71d8d2d6408d:35963 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a5828a47a944157a7810f7172d1310f 2024-11-24T03:47:57,353 DEBUG [M:0;71d8d2d6408d:35963 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fdbbad43ec9142eb96d3d66b37f3c50d as hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fdbbad43ec9142eb96d3d66b37f3c50d 2024-11-24T03:47:57,359 INFO [M:0;71d8d2d6408d:35963 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fdbbad43ec9142eb96d3d66b37f3c50d, entries=8, sequenceid=60, filesize=5.5 K 2024-11-24T03:47:57,360 DEBUG [M:0;71d8d2d6408d:35963 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/90abb6b223a840edbde1e3026bbd5ce8 as hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/90abb6b223a840edbde1e3026bbd5ce8 2024-11-24T03:47:57,364 INFO [M:0;71d8d2d6408d:35963 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 90abb6b223a840edbde1e3026bbd5ce8 2024-11-24T03:47:57,365 INFO [M:0;71d8d2d6408d:35963 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/90abb6b223a840edbde1e3026bbd5ce8, entries=6, sequenceid=60, filesize=6.1 K 2024-11-24T03:47:57,366 DEBUG [M:0;71d8d2d6408d:35963 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/089bedf302d34bb988700b715009e8cb as hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/089bedf302d34bb988700b715009e8cb 2024-11-24T03:47:57,370 INFO [M:0;71d8d2d6408d:35963 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/089bedf302d34bb988700b715009e8cb, entries=2, sequenceid=60, filesize=5.1 K 2024-11-24T03:47:57,371 DEBUG [M:0;71d8d2d6408d:35963 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a5828a47a944157a7810f7172d1310f as hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8a5828a47a944157a7810f7172d1310f 2024-11-24T03:47:57,376 INFO [M:0;71d8d2d6408d:35963 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8a5828a47a944157a7810f7172d1310f, entries=1, sequenceid=60, filesize=4.9 K 2024-11-24T03:47:57,377 INFO [M:0;71d8d2d6408d:35963 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=60, compaction requested=false 2024-11-24T03:47:57,379 INFO [M:0;71d8d2d6408d:35963 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:57,379 DEBUG [M:0;71d8d2d6408d:35963 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732420077247Disabling compacts and flushes for region at 1732420077247Disabling writes for close at 1732420077248 (+1 ms)Obtaining lock to block concurrent updates at 1732420077248Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732420077248Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1732420077248Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732420077249 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732420077249Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732420077262 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732420077262Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732420077273 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732420077286 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732420077286Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732420077298 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732420077312 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732420077312Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732420077323 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732420077341 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732420077341Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@137b9c33: reopening flushed file at 1732420077352 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@18979c2: reopening flushed file at 1732420077359 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@174b9ae5: reopening flushed file at 1732420077365 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1127a0db: reopening flushed file at 1732420077370 (+5 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=60, compaction requested=false at 1732420077378 (+8 ms)Writing region close event to WAL at 1732420077379 (+1 ms)Closed at 1732420077379 2024-11-24T03:47:57,379 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:57,379 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:57,380 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:57,380 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:57,380 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:47:57,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741880_1063 (size=1045) 2024-11-24T03:47:57,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741880_1063 (size=1045) 2024-11-24T03:47:57,383 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T03:47:57,383 INFO [M:0;71d8d2d6408d:35963 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T03:47:57,383 INFO [M:0;71d8d2d6408d:35963 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35963 2024-11-24T03:47:57,383 INFO [M:0;71d8d2d6408d:35963 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T03:47:57,511 INFO [M:0;71d8d2d6408d:35963 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T03:47:57,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:47:57,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35963-0x1016c3d6adf0000, quorum=127.0.0.1:51797, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:47:57,548 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@239bd5a6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:57,549 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15efdeac{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:47:57,549 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:47:57,549 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54643be5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:47:57,550 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b1cb5ac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.log.dir/,STOPPED} 2024-11-24T03:47:57,552 WARN [BP-258016331-172.17.0.2-1732420030834 heartbeating to localhost/127.0.0.1:44445 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:47:57,552 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:47:57,552 WARN [BP-258016331-172.17.0.2-1732420030834 heartbeating to localhost/127.0.0.1:44445 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-258016331-172.17.0.2-1732420030834 (Datanode Uuid df14d960-b4ba-43c0-a068-d20eb718348c) service to localhost/127.0.0.1:44445 2024-11-24T03:47:57,552 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:47:57,552 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3feb9ef {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-258016331-172.17.0.2-1732420030834:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:43649,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:34843 , LocalHost:localPort 71d8d2d6408d/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T03:47:57,553 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3feb9ef {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-258016331-172.17.0.2-1732420030834:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:46357,null,null]) java.io.IOException: No block pool offer service for bpid=BP-258016331-172.17.0.2-1732420030834 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:57,553 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data3/current/BP-258016331-172.17.0.2-1732420030834 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:57,553 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3feb9ef {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-258016331-172.17.0.2-1732420030834:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:43649,null,null], DatanodeInfoWithStorage[127.0.0.1:46357,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-258016331-172.17.0.2-1732420030834:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:43649,null,null], DatanodeInfoWithStorage[127.0.0.1:46357,null,null]] 2024-11-24T03:47:57,553 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3feb9ef {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-258016331-172.17.0.2-1732420030834:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:43649,null,null]) java.io.IOException: No block pool offer service for bpid=BP-258016331-172.17.0.2-1732420030834 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:57,554 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data4/current/BP-258016331-172.17.0.2-1732420030834 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:57,554 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3feb9ef {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-258016331-172.17.0.2-1732420030834:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:46357,null,null]) java.io.IOException: No block pool offer service for bpid=BP-258016331-172.17.0.2-1732420030834 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:47:57,554 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3feb9ef {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-258016331-172.17.0.2-1732420030834:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:43649,null,null], DatanodeInfoWithStorage[127.0.0.1:46357,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-258016331-172.17.0.2-1732420030834:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:43649,null,null], DatanodeInfoWithStorage[127.0.0.1:46357,null,null]] 2024-11-24T03:47:57,554 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:47:57,555 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T03:47:57,557 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ea87f36{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:57,558 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@45b83ed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:47:57,558 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:47:57,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e0d6f2f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:47:57,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56161739{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.log.dir/,STOPPED} 2024-11-24T03:47:57,569 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,570 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,570 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,570 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,571 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,577 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:47:57,581 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T03:47:57,581 WARN [BP-258016331-172.17.0.2-1732420030834 heartbeating to localhost/127.0.0.1:44445 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:47:57,581 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:47:57,581 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T03:47:57,581 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:47:57,581 WARN [BP-258016331-172.17.0.2-1732420030834 heartbeating to localhost/127.0.0.1:44445 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-258016331-172.17.0.2-1732420030834 (Datanode Uuid 17aeb76d-2b9c-4be7-9637-d6f2a7efeb84) service to localhost/127.0.0.1:44445 2024-11-24T03:47:57,581 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T03:47:57,581 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T03:47:57,582 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data5/current/BP-258016331-172.17.0.2-1732420030834 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:57,582 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/cluster_d757aee2-fdae-f978-c06f-f9ec25e0f6fe/data/data6/current/BP-258016331-172.17.0.2-1732420030834 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:47:57,582 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:47:57,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2cd29190{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T03:47:57,587 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3cc2b5d2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:47:57,587 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:47:57,588 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48202e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:47:57,588 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57980111{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.log.dir/,STOPPED} 2024-11-24T03:47:57,595 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T03:47:57,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T03:47:57,632 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=157 (was 82) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:38553 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44445 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:44445 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44445 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44445 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44445 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:44445 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44445 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f6f70bef390.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:44445 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f6f70bef390.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:44445 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:44445 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38553 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:44445 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=454 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=193 (was 254), ProcessCount=11 (was 12), AvailableMemoryMB=7467 (was 8312) 2024-11-24T03:47:57,638 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=157, OpenFileDescriptor=454, MaxFileDescriptor=1048576, SystemLoadAverage=193, ProcessCount=11, AvailableMemoryMB=7466 2024-11-24T03:47:57,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T03:47:57,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.log.dir so I do NOT create it in target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1 2024-11-24T03:47:57,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95b40cbd-5cc7-bcc6-1a77-e8e2960294fc/hadoop.tmp.dir so I do NOT create it in target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1 2024-11-24T03:47:57,639 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1, deleteOnExit=true 2024-11-24T03:47:57,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T03:47:57,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/test.cache.data in system properties and HBase conf 2024-11-24T03:47:57,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T03:47:57,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.log.dir in system properties and HBase conf 2024-11-24T03:47:57,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T03:47:57,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T03:47:57,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T03:47:57,639 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T03:47:57,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T03:47:57,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T03:47:57,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T03:47:57,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T03:47:57,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T03:47:57,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T03:47:57,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T03:47:57,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T03:47:57,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T03:47:57,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/nfs.dump.dir in system properties and HBase conf 2024-11-24T03:47:57,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/java.io.tmpdir in system properties and HBase conf 2024-11-24T03:47:57,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T03:47:57,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T03:47:57,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T03:47:57,653 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T03:47:57,941 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:47:57,945 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:47:57,946 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:47:57,946 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:47:57,946 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T03:47:57,947 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:47:57,947 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@426efeef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:47:57,948 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e4608e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:47:57,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:47:57,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:47:58,040 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b9a91a6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/java.io.tmpdir/jetty-localhost-34473-hadoop-hdfs-3_4_1-tests_jar-_-any-8065772129419595398/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T03:47:58,041 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a096cfb{HTTP/1.1, (http/1.1)}{localhost:34473} 2024-11-24T03:47:58,041 INFO [Time-limited test {}] server.Server(415): Started @157776ms 2024-11-24T03:47:58,051 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T03:47:58,254 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:47:58,258 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:47:58,259 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:47:58,259 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:47:58,259 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T03:47:58,260 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30acbc29{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:47:58,260 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a9e9581{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:47:58,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14694414{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/java.io.tmpdir/jetty-localhost-41481-hadoop-hdfs-3_4_1-tests_jar-_-any-5680163791656455990/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:58,352 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@567663d4{HTTP/1.1, (http/1.1)}{localhost:41481} 2024-11-24T03:47:58,352 INFO [Time-limited test {}] server.Server(415): Started @158088ms 2024-11-24T03:47:58,353 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:47:58,389 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:47:58,393 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:47:58,394 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:47:58,394 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:47:58,394 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T03:47:58,394 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@14c3623d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:47:58,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6fc46d5b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:47:58,489 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@32ad5d3f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/java.io.tmpdir/jetty-localhost-33741-hadoop-hdfs-3_4_1-tests_jar-_-any-4253156449794191670/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:47:58,489 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3edd0360{HTTP/1.1, (http/1.1)}{localhost:33741} 2024-11-24T03:47:58,489 INFO [Time-limited test {}] server.Server(415): Started @158225ms 2024-11-24T03:47:58,491 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:47:58,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:47:58,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:47:59,337 WARN [Thread-1199 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/data/data1/current/BP-711475820-172.17.0.2-1732420077663/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:59,337 WARN [Thread-1200 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/data/data2/current/BP-711475820-172.17.0.2-1732420077663/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:59,356 WARN [Thread-1163 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:47:59,359 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4f381dd07fd809e7 with lease ID 0xae5b0f24c83c1294: Processing first storage report for DS-7c23efdc-9dc5-488f-8d77-731691371aa6 from datanode DatanodeRegistration(127.0.0.1:34135, datanodeUuid=5629a8ec-134d-4be7-b6e9-1ac3e524de47, infoPort=42061, infoSecurePort=0, ipcPort=35881, storageInfo=lv=-57;cid=testClusterID;nsid=458153394;c=1732420077663) 2024-11-24T03:47:59,359 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f381dd07fd809e7 with lease ID 0xae5b0f24c83c1294: from storage DS-7c23efdc-9dc5-488f-8d77-731691371aa6 node DatanodeRegistration(127.0.0.1:34135, datanodeUuid=5629a8ec-134d-4be7-b6e9-1ac3e524de47, infoPort=42061, infoSecurePort=0, ipcPort=35881, storageInfo=lv=-57;cid=testClusterID;nsid=458153394;c=1732420077663), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:59,359 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4f381dd07fd809e7 with lease ID 0xae5b0f24c83c1294: Processing first storage report for DS-adf569e4-4af8-477f-8b82-607f950c86d0 from datanode DatanodeRegistration(127.0.0.1:34135, datanodeUuid=5629a8ec-134d-4be7-b6e9-1ac3e524de47, infoPort=42061, infoSecurePort=0, ipcPort=35881, storageInfo=lv=-57;cid=testClusterID;nsid=458153394;c=1732420077663) 2024-11-24T03:47:59,359 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f381dd07fd809e7 with lease ID 0xae5b0f24c83c1294: from storage DS-adf569e4-4af8-477f-8b82-607f950c86d0 node DatanodeRegistration(127.0.0.1:34135, datanodeUuid=5629a8ec-134d-4be7-b6e9-1ac3e524de47, infoPort=42061, infoSecurePort=0, ipcPort=35881, storageInfo=lv=-57;cid=testClusterID;nsid=458153394;c=1732420077663), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:59,507 WARN [Thread-1210 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/data/data3/current/BP-711475820-172.17.0.2-1732420077663/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:59,507 WARN [Thread-1211 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/data/data4/current/BP-711475820-172.17.0.2-1732420077663/current, will proceed with Du for space computation calculation, 2024-11-24T03:47:59,528 WARN [Thread-1186 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:47:59,530 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x77a87707d2f75bd1 with lease ID 0xae5b0f24c83c1295: Processing first storage report for DS-cfc90098-62b0-4aa4-94ab-64556f7e3a5b from datanode DatanodeRegistration(127.0.0.1:40049, datanodeUuid=0ccea49f-235e-49bb-a172-1d425c73d758, infoPort=41565, infoSecurePort=0, ipcPort=45303, storageInfo=lv=-57;cid=testClusterID;nsid=458153394;c=1732420077663) 2024-11-24T03:47:59,530 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x77a87707d2f75bd1 with lease ID 0xae5b0f24c83c1295: from storage DS-cfc90098-62b0-4aa4-94ab-64556f7e3a5b node DatanodeRegistration(127.0.0.1:40049, datanodeUuid=0ccea49f-235e-49bb-a172-1d425c73d758, infoPort=41565, infoSecurePort=0, ipcPort=45303, storageInfo=lv=-57;cid=testClusterID;nsid=458153394;c=1732420077663), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:59,531 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x77a87707d2f75bd1 with lease ID 0xae5b0f24c83c1295: Processing first storage report for DS-751db3f8-b324-4f00-8918-2615bc885718 from datanode DatanodeRegistration(127.0.0.1:40049, datanodeUuid=0ccea49f-235e-49bb-a172-1d425c73d758, infoPort=41565, infoSecurePort=0, ipcPort=45303, storageInfo=lv=-57;cid=testClusterID;nsid=458153394;c=1732420077663) 2024-11-24T03:47:59,531 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x77a87707d2f75bd1 with lease ID 0xae5b0f24c83c1295: from storage DS-751db3f8-b324-4f00-8918-2615bc885718 node DatanodeRegistration(127.0.0.1:40049, datanodeUuid=0ccea49f-235e-49bb-a172-1d425c73d758, infoPort=41565, infoSecurePort=0, ipcPort=45303, storageInfo=lv=-57;cid=testClusterID;nsid=458153394;c=1732420077663), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:47:59,626 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1 2024-11-24T03:47:59,630 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/zookeeper_0, clientPort=60504, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T03:47:59,630 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60504 2024-11-24T03:47:59,631 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:59,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:59,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34135 is added to blk_1073741825_1001 (size=7) 2024-11-24T03:47:59,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40049 is added to blk_1073741825_1001 (size=7) 2024-11-24T03:47:59,644 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef with version=8 2024-11-24T03:47:59,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/hbase-staging 2024-11-24T03:47:59,647 INFO [Time-limited test {}] client.ConnectionUtils(128): master/71d8d2d6408d:0 server-side Connection retries=45 2024-11-24T03:47:59,647 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:59,647 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:59,647 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T03:47:59,647 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:59,647 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T03:47:59,647 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T03:47:59,647 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T03:47:59,648 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41523 2024-11-24T03:47:59,650 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41523 connecting to ZooKeeper ensemble=127.0.0.1:60504 2024-11-24T03:47:59,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:415230x0, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T03:47:59,695 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41523-0x1016c3e22d20000 connected 2024-11-24T03:47:59,779 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:59,783 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:59,788 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:47:59,788 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef, hbase.cluster.distributed=false 2024-11-24T03:47:59,790 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T03:47:59,790 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41523 2024-11-24T03:47:59,791 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41523 2024-11-24T03:47:59,791 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41523 2024-11-24T03:47:59,791 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41523 2024-11-24T03:47:59,791 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41523 2024-11-24T03:47:59,807 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/71d8d2d6408d:0 server-side Connection retries=45 2024-11-24T03:47:59,807 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:59,807 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:59,807 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T03:47:59,807 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:47:59,807 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T03:47:59,807 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T03:47:59,807 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T03:47:59,808 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42303 2024-11-24T03:47:59,809 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42303 connecting to ZooKeeper ensemble=127.0.0.1:60504 2024-11-24T03:47:59,809 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:59,811 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:59,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:423030x0, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T03:47:59,823 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:423030x0, quorum=127.0.0.1:60504, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:47:59,823 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42303-0x1016c3e22d20001 connected 2024-11-24T03:47:59,824 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T03:47:59,824 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T03:47:59,825 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T03:47:59,827 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T03:47:59,830 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42303 2024-11-24T03:47:59,831 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42303 2024-11-24T03:47:59,831 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42303 2024-11-24T03:47:59,833 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42303 2024-11-24T03:47:59,834 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42303 2024-11-24T03:47:59,848 DEBUG [M:0;71d8d2d6408d:41523 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;71d8d2d6408d:41523 2024-11-24T03:47:59,848 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/71d8d2d6408d,41523,1732420079646 2024-11-24T03:47:59,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:47:59,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:47:59,857 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/71d8d2d6408d,41523,1732420079646 2024-11-24T03:47:59,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T03:47:59,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:59,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:59,876 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T03:47:59,877 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/71d8d2d6408d,41523,1732420079646 from backup master directory 2024-11-24T03:47:59,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/71d8d2d6408d,41523,1732420079646 2024-11-24T03:47:59,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:47:59,886 WARN [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T03:47:59,886 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=71d8d2d6408d,41523,1732420079646 2024-11-24T03:47:59,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:47:59,892 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/hbase.id] with ID: 591ec7f0-dc4f-4481-8ba1-f83554158ade 2024-11-24T03:47:59,892 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/.tmp/hbase.id 2024-11-24T03:47:59,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40049 is added to blk_1073741826_1002 (size=42) 2024-11-24T03:47:59,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34135 is added to blk_1073741826_1002 (size=42) 2024-11-24T03:47:59,906 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/.tmp/hbase.id]:[hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/hbase.id] 2024-11-24T03:47:59,920 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:47:59,920 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T03:47:59,922 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-24T03:47:59,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:59,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:47:59,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34135 is added to blk_1073741827_1003 (size=196) 2024-11-24T03:47:59,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40049 is added to blk_1073741827_1003 (size=196) 2024-11-24T03:47:59,939 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T03:47:59,939 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T03:47:59,940 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:47:59,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40049 is added to blk_1073741828_1004 (size=1189) 2024-11-24T03:47:59,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34135 is added to blk_1073741828_1004 (size=1189) 2024-11-24T03:47:59,947 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store 2024-11-24T03:47:59,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34135 is added to blk_1073741829_1005 (size=34) 2024-11-24T03:47:59,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40049 is added to blk_1073741829_1005 (size=34) 2024-11-24T03:47:59,954 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:47:59,954 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T03:47:59,954 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:59,954 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:59,954 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T03:47:59,954 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:59,954 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:47:59,954 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732420079954Disabling compacts and flushes for region at 1732420079954Disabling writes for close at 1732420079954Writing region close event to WAL at 1732420079954Closed at 1732420079954 2024-11-24T03:47:59,955 WARN [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/.initializing 2024-11-24T03:47:59,955 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/WALs/71d8d2d6408d,41523,1732420079646 2024-11-24T03:47:59,958 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C41523%2C1732420079646, suffix=, logDir=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/WALs/71d8d2d6408d,41523,1732420079646, archiveDir=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/oldWALs, maxLogs=10 2024-11-24T03:47:59,958 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C41523%2C1732420079646.1732420079958 2024-11-24T03:47:59,963 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/WALs/71d8d2d6408d,41523,1732420079646/71d8d2d6408d%2C41523%2C1732420079646.1732420079958 2024-11-24T03:47:59,963 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41565:41565),(127.0.0.1/127.0.0.1:42061:42061)] 2024-11-24T03:47:59,964 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:47:59,964 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:47:59,964 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:59,964 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:59,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:59,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T03:47:59,967 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:59,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:47:59,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:59,968 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T03:47:59,968 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:59,968 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:47:59,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:59,970 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T03:47:59,970 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:59,970 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:47:59,970 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:59,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T03:47:59,971 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:47:59,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:47:59,972 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:59,972 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:59,972 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:59,974 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:59,974 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:59,974 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T03:47:59,975 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:47:59,977 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:47:59,978 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=790324, jitterRate=0.004949823021888733}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T03:47:59,978 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732420079964Initializing all the Stores at 1732420079965 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420079965Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420079965Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420079965Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420079965Cleaning up temporary data from old regions at 1732420079974 (+9 ms)Region opened successfully at 1732420079978 (+4 ms) 2024-11-24T03:47:59,979 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T03:47:59,982 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2422f0bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=71d8d2d6408d/172.17.0.2:0 2024-11-24T03:47:59,983 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T03:47:59,983 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T03:47:59,983 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T03:47:59,983 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T03:47:59,984 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T03:47:59,984 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T03:47:59,984 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T03:47:59,986 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T03:47:59,987 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T03:47:59,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:47:59,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:00,015 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T03:48:00,015 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T03:48:00,016 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T03:48:00,023 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T03:48:00,023 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T03:48:00,024 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T03:48:00,036 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T03:48:00,037 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T03:48:00,048 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T03:48:00,050 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T03:48:00,061 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T03:48:00,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T03:48:00,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T03:48:00,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:00,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:00,070 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=71d8d2d6408d,41523,1732420079646, sessionid=0x1016c3e22d20000, setting cluster-up flag (Was=false) 2024-11-24T03:48:00,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:00,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:00,111 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T03:48:00,112 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=71d8d2d6408d,41523,1732420079646 2024-11-24T03:48:00,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:00,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:00,156 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T03:48:00,158 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=71d8d2d6408d,41523,1732420079646 2024-11-24T03:48:00,159 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T03:48:00,163 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T03:48:00,163 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T03:48:00,163 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T03:48:00,164 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 71d8d2d6408d,41523,1732420079646 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T03:48:00,165 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:48:00,165 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:48:00,165 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:48:00,165 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:48:00,165 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/71d8d2d6408d:0, corePoolSize=10, maxPoolSize=10 2024-11-24T03:48:00,165 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:00,166 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T03:48:00,166 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:00,170 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:48:00,170 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T03:48:00,171 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:00,171 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T03:48:00,177 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732420110176 2024-11-24T03:48:00,177 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T03:48:00,177 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T03:48:00,177 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T03:48:00,177 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T03:48:00,177 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T03:48:00,177 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T03:48:00,177 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,179 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T03:48:00,179 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T03:48:00,179 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T03:48:00,182 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T03:48:00,182 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T03:48:00,183 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420080183,5,FailOnTimeoutGroup] 2024-11-24T03:48:00,183 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420080183,5,FailOnTimeoutGroup] 2024-11-24T03:48:00,183 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,183 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T03:48:00,183 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,183 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40049 is added to blk_1073741831_1007 (size=1321) 2024-11-24T03:48:00,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34135 is added to blk_1073741831_1007 (size=1321) 2024-11-24T03:48:00,186 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T03:48:00,186 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef 2024-11-24T03:48:00,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34135 is added to blk_1073741832_1008 (size=32) 2024-11-24T03:48:00,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40049 is added to blk_1073741832_1008 (size=32) 2024-11-24T03:48:00,196 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:48:00,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T03:48:00,199 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T03:48:00,199 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:00,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:00,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T03:48:00,201 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T03:48:00,201 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:00,202 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:00,202 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T03:48:00,203 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T03:48:00,203 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:00,204 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:00,204 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T03:48:00,205 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T03:48:00,205 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:00,206 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:00,206 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T03:48:00,207 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740 2024-11-24T03:48:00,207 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740 2024-11-24T03:48:00,208 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T03:48:00,208 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T03:48:00,209 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T03:48:00,210 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T03:48:00,212 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:48:00,212 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=863389, jitterRate=0.09785620868206024}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T03:48:00,213 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732420080196Initializing all the Stores at 1732420080197 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420080197Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420080197Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420080197Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420080197Cleaning up temporary data from old regions at 1732420080208 (+11 ms)Region opened successfully at 1732420080212 (+4 ms) 2024-11-24T03:48:00,213 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T03:48:00,213 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T03:48:00,213 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T03:48:00,213 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T03:48:00,213 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T03:48:00,213 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T03:48:00,213 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732420080213Disabling compacts and flushes for region at 1732420080213Disabling writes for close at 1732420080213Writing region close event to WAL at 1732420080213Closed at 1732420080213 2024-11-24T03:48:00,214 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:48:00,214 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T03:48:00,214 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T03:48:00,215 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T03:48:00,216 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T03:48:00,238 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer(746): ClusterId : 591ec7f0-dc4f-4481-8ba1-f83554158ade 2024-11-24T03:48:00,238 DEBUG [RS:0;71d8d2d6408d:42303 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T03:48:00,255 DEBUG [RS:0;71d8d2d6408d:42303 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T03:48:00,255 DEBUG [RS:0;71d8d2d6408d:42303 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T03:48:00,267 DEBUG [RS:0;71d8d2d6408d:42303 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T03:48:00,267 DEBUG [RS:0;71d8d2d6408d:42303 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2078c698, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=71d8d2d6408d/172.17.0.2:0 2024-11-24T03:48:00,282 DEBUG [RS:0;71d8d2d6408d:42303 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;71d8d2d6408d:42303 2024-11-24T03:48:00,282 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T03:48:00,282 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T03:48:00,282 DEBUG [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T03:48:00,283 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer(2659): reportForDuty to master=71d8d2d6408d,41523,1732420079646 with port=42303, startcode=1732420079806 2024-11-24T03:48:00,283 DEBUG [RS:0;71d8d2d6408d:42303 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T03:48:00,285 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60595, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T03:48:00,285 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41523 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 71d8d2d6408d,42303,1732420079806 2024-11-24T03:48:00,285 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41523 {}] master.ServerManager(517): Registering regionserver=71d8d2d6408d,42303,1732420079806 2024-11-24T03:48:00,287 DEBUG [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef 2024-11-24T03:48:00,287 DEBUG [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35611 2024-11-24T03:48:00,287 DEBUG [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T03:48:00,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T03:48:00,298 DEBUG [RS:0;71d8d2d6408d:42303 {}] zookeeper.ZKUtil(111): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/71d8d2d6408d,42303,1732420079806 2024-11-24T03:48:00,298 WARN [RS:0;71d8d2d6408d:42303 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T03:48:00,298 INFO [RS:0;71d8d2d6408d:42303 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:48:00,299 DEBUG [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806 2024-11-24T03:48:00,299 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [71d8d2d6408d,42303,1732420079806] 2024-11-24T03:48:00,303 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T03:48:00,305 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T03:48:00,306 INFO [RS:0;71d8d2d6408d:42303 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T03:48:00,306 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,306 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T03:48:00,308 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T03:48:00,308 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,309 DEBUG [RS:0;71d8d2d6408d:42303 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:00,309 DEBUG [RS:0;71d8d2d6408d:42303 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:00,309 DEBUG [RS:0;71d8d2d6408d:42303 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:00,309 DEBUG [RS:0;71d8d2d6408d:42303 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:00,309 DEBUG [RS:0;71d8d2d6408d:42303 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:00,309 DEBUG [RS:0;71d8d2d6408d:42303 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/71d8d2d6408d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T03:48:00,309 DEBUG [RS:0;71d8d2d6408d:42303 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:00,309 DEBUG [RS:0;71d8d2d6408d:42303 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:00,310 DEBUG [RS:0;71d8d2d6408d:42303 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:00,310 DEBUG [RS:0;71d8d2d6408d:42303 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:00,310 DEBUG [RS:0;71d8d2d6408d:42303 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:00,310 DEBUG [RS:0;71d8d2d6408d:42303 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:00,310 DEBUG [RS:0;71d8d2d6408d:42303 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/71d8d2d6408d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T03:48:00,310 DEBUG [RS:0;71d8d2d6408d:42303 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T03:48:00,311 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,311 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,311 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,311 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,311 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,311 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,42303,1732420079806-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T03:48:00,330 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T03:48:00,330 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,42303,1732420079806-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,331 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,331 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.Replication(171): 71d8d2d6408d,42303,1732420079806 started 2024-11-24T03:48:00,345 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,345 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer(1482): Serving as 71d8d2d6408d,42303,1732420079806, RpcServer on 71d8d2d6408d/172.17.0.2:42303, sessionid=0x1016c3e22d20001 2024-11-24T03:48:00,345 DEBUG [RS:0;71d8d2d6408d:42303 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T03:48:00,345 DEBUG [RS:0;71d8d2d6408d:42303 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 71d8d2d6408d,42303,1732420079806 2024-11-24T03:48:00,345 DEBUG [RS:0;71d8d2d6408d:42303 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '71d8d2d6408d,42303,1732420079806' 2024-11-24T03:48:00,345 DEBUG [RS:0;71d8d2d6408d:42303 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T03:48:00,346 DEBUG [RS:0;71d8d2d6408d:42303 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T03:48:00,346 DEBUG [RS:0;71d8d2d6408d:42303 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T03:48:00,346 DEBUG [RS:0;71d8d2d6408d:42303 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T03:48:00,346 DEBUG [RS:0;71d8d2d6408d:42303 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 71d8d2d6408d,42303,1732420079806 2024-11-24T03:48:00,346 DEBUG [RS:0;71d8d2d6408d:42303 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '71d8d2d6408d,42303,1732420079806' 2024-11-24T03:48:00,346 DEBUG [RS:0;71d8d2d6408d:42303 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T03:48:00,346 DEBUG [RS:0;71d8d2d6408d:42303 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T03:48:00,347 DEBUG [RS:0;71d8d2d6408d:42303 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T03:48:00,347 INFO [RS:0;71d8d2d6408d:42303 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T03:48:00,347 INFO [RS:0;71d8d2d6408d:42303 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T03:48:00,366 WARN [71d8d2d6408d:41523 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T03:48:00,451 INFO [RS:0;71d8d2d6408d:42303 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C42303%2C1732420079806, suffix=, logDir=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806, archiveDir=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/oldWALs, maxLogs=32 2024-11-24T03:48:00,453 INFO [RS:0;71d8d2d6408d:42303 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C42303%2C1732420079806.1732420080452 2024-11-24T03:48:00,461 INFO [RS:0;71d8d2d6408d:42303 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420080452 2024-11-24T03:48:00,463 DEBUG [RS:0;71d8d2d6408d:42303 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41565:41565),(127.0.0.1/127.0.0.1:42061:42061)] 2024-11-24T03:48:00,617 DEBUG [71d8d2d6408d:41523 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T03:48:00,618 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=71d8d2d6408d,42303,1732420079806 2024-11-24T03:48:00,621 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 71d8d2d6408d,42303,1732420079806, state=OPENING 2024-11-24T03:48:00,644 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T03:48:00,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:00,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:00,654 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T03:48:00,654 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:48:00,654 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:48:00,654 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=71d8d2d6408d,42303,1732420079806}] 2024-11-24T03:48:00,812 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T03:48:00,814 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42767, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T03:48:00,821 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T03:48:00,821 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:48:00,825 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C42303%2C1732420079806.meta, suffix=.meta, logDir=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806, archiveDir=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/oldWALs, maxLogs=32 2024-11-24T03:48:00,825 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C42303%2C1732420079806.meta.1732420080825.meta 2024-11-24T03:48:00,831 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.meta.1732420080825.meta 2024-11-24T03:48:00,832 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41565:41565),(127.0.0.1/127.0.0.1:42061:42061)] 2024-11-24T03:48:00,832 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:48:00,833 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T03:48:00,833 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T03:48:00,833 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T03:48:00,833 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T03:48:00,833 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:48:00,833 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T03:48:00,833 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T03:48:00,834 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T03:48:00,835 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T03:48:00,835 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:00,836 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:00,836 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T03:48:00,837 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T03:48:00,837 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:00,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:00,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T03:48:00,838 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T03:48:00,838 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:00,839 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:00,839 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T03:48:00,839 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T03:48:00,839 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:00,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:00,840 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T03:48:00,841 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740 2024-11-24T03:48:00,842 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740 2024-11-24T03:48:00,843 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T03:48:00,844 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T03:48:00,844 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T03:48:00,846 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T03:48:00,846 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766542, jitterRate=-0.025292307138442993}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T03:48:00,846 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T03:48:00,847 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732420080833Writing region info on filesystem at 1732420080833Initializing all the Stores at 1732420080834 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420080834Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420080834Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420080834Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420080834Cleaning up temporary data from old regions at 1732420080844 (+10 ms)Running coprocessor post-open hooks at 1732420080846 (+2 ms)Region opened successfully at 1732420080847 (+1 ms) 2024-11-24T03:48:00,848 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732420080811 2024-11-24T03:48:00,850 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T03:48:00,850 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T03:48:00,851 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=71d8d2d6408d,42303,1732420079806 2024-11-24T03:48:00,852 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 71d8d2d6408d,42303,1732420079806, state=OPEN 2024-11-24T03:48:00,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T03:48:00,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T03:48:00,895 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=71d8d2d6408d,42303,1732420079806 2024-11-24T03:48:00,896 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:48:00,896 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:48:00,902 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T03:48:00,902 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=71d8d2d6408d,42303,1732420079806 in 242 msec 2024-11-24T03:48:00,906 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T03:48:00,907 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 688 msec 2024-11-24T03:48:00,908 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:48:00,908 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T03:48:00,910 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T03:48:00,910 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=71d8d2d6408d,42303,1732420079806, seqNum=-1] 2024-11-24T03:48:00,911 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T03:48:00,912 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49935, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T03:48:00,922 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 758 msec 2024-11-24T03:48:00,922 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732420080922, completionTime=-1 2024-11-24T03:48:00,922 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T03:48:00,922 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T03:48:00,924 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T03:48:00,924 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732420140924 2024-11-24T03:48:00,924 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732420200924 2024-11-24T03:48:00,924 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T03:48:00,924 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,41523,1732420079646-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,924 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,41523,1732420079646-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,925 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,41523,1732420079646-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,925 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-71d8d2d6408d:41523, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,925 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,925 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,927 DEBUG [master/71d8d2d6408d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T03:48:00,928 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.042sec 2024-11-24T03:48:00,928 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T03:48:00,928 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T03:48:00,928 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T03:48:00,928 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T03:48:00,928 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T03:48:00,928 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,41523,1732420079646-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T03:48:00,928 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,41523,1732420079646-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T03:48:00,931 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T03:48:00,931 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T03:48:00,931 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,41523,1732420079646-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:00,938 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48cd4967, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:48:00,938 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 71d8d2d6408d,41523,-1 for getting cluster id 2024-11-24T03:48:00,938 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T03:48:00,940 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '591ec7f0-dc4f-4481-8ba1-f83554158ade' 2024-11-24T03:48:00,940 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T03:48:00,940 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "591ec7f0-dc4f-4481-8ba1-f83554158ade" 2024-11-24T03:48:00,940 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d2d3ab8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:48:00,940 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [71d8d2d6408d,41523,-1] 2024-11-24T03:48:00,941 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T03:48:00,941 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:48:00,943 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50342, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T03:48:00,944 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29189d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:48:00,944 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T03:48:00,945 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=71d8d2d6408d,42303,1732420079806, seqNum=-1] 2024-11-24T03:48:00,945 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T03:48:00,947 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38702, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T03:48:00,949 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=71d8d2d6408d,41523,1732420079646 2024-11-24T03:48:00,950 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:48:00,953 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T03:48:00,953 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-24T03:48:00,953 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-24T03:48:00,954 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T03:48:00,955 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 71d8d2d6408d,41523,1732420079646 2024-11-24T03:48:00,955 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@612cb3fe 2024-11-24T03:48:00,955 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T03:48:00,958 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50344, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T03:48:00,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41523 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T03:48:00,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41523 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T03:48:00,959 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41523 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T03:48:00,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41523 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T03:48:00,962 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T03:48:00,962 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:00,962 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41523 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-24T03:48:00,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41523 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T03:48:00,963 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T03:48:00,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40049 is added to blk_1073741835_1011 (size=395) 2024-11-24T03:48:00,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34135 is added to blk_1073741835_1011 (size=395) 2024-11-24T03:48:00,971 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 36d2c5fda6359ddcf1ae200b2895772c, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef 2024-11-24T03:48:00,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40049 is added to blk_1073741836_1012 (size=78) 2024-11-24T03:48:00,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34135 is added to blk_1073741836_1012 (size=78) 2024-11-24T03:48:00,979 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:48:00,979 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 36d2c5fda6359ddcf1ae200b2895772c, disabling compactions & flushes 2024-11-24T03:48:00,979 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c. 2024-11-24T03:48:00,979 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c. 2024-11-24T03:48:00,979 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c. after waiting 0 ms 2024-11-24T03:48:00,979 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c. 2024-11-24T03:48:00,979 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c. 2024-11-24T03:48:00,979 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 36d2c5fda6359ddcf1ae200b2895772c: Waiting for close lock at 1732420080979Disabling compacts and flushes for region at 1732420080979Disabling writes for close at 1732420080979Writing region close event to WAL at 1732420080979Closed at 1732420080979 2024-11-24T03:48:00,980 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T03:48:00,981 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732420080980"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732420080980"}]},"ts":"1732420080980"} 2024-11-24T03:48:00,983 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T03:48:00,985 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T03:48:00,985 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732420080985"}]},"ts":"1732420080985"} 2024-11-24T03:48:00,987 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-24T03:48:00,988 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=36d2c5fda6359ddcf1ae200b2895772c, ASSIGN}] 2024-11-24T03:48:00,989 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=36d2c5fda6359ddcf1ae200b2895772c, ASSIGN 2024-11-24T03:48:00,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:00,990 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=36d2c5fda6359ddcf1ae200b2895772c, ASSIGN; state=OFFLINE, location=71d8d2d6408d,42303,1732420079806; forceNewPlan=false, retain=false 2024-11-24T03:48:00,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:01,141 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=36d2c5fda6359ddcf1ae200b2895772c, regionState=OPENING, regionLocation=71d8d2d6408d,42303,1732420079806 2024-11-24T03:48:01,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=36d2c5fda6359ddcf1ae200b2895772c, ASSIGN because future has completed 2024-11-24T03:48:01,146 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 36d2c5fda6359ddcf1ae200b2895772c, server=71d8d2d6408d,42303,1732420079806}] 2024-11-24T03:48:01,304 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c. 2024-11-24T03:48:01,304 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 36d2c5fda6359ddcf1ae200b2895772c, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c.', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:48:01,304 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 36d2c5fda6359ddcf1ae200b2895772c 2024-11-24T03:48:01,304 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:48:01,304 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 36d2c5fda6359ddcf1ae200b2895772c 2024-11-24T03:48:01,304 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 36d2c5fda6359ddcf1ae200b2895772c 2024-11-24T03:48:01,306 INFO [StoreOpener-36d2c5fda6359ddcf1ae200b2895772c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 36d2c5fda6359ddcf1ae200b2895772c 2024-11-24T03:48:01,308 INFO [StoreOpener-36d2c5fda6359ddcf1ae200b2895772c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36d2c5fda6359ddcf1ae200b2895772c columnFamilyName info 2024-11-24T03:48:01,308 DEBUG [StoreOpener-36d2c5fda6359ddcf1ae200b2895772c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:01,309 INFO [StoreOpener-36d2c5fda6359ddcf1ae200b2895772c-1 {}] regionserver.HStore(327): Store=36d2c5fda6359ddcf1ae200b2895772c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:48:01,309 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 36d2c5fda6359ddcf1ae200b2895772c 2024-11-24T03:48:01,310 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/default/TestLogRolling-testLogRollOnPipelineRestart/36d2c5fda6359ddcf1ae200b2895772c 2024-11-24T03:48:01,311 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/default/TestLogRolling-testLogRollOnPipelineRestart/36d2c5fda6359ddcf1ae200b2895772c 2024-11-24T03:48:01,312 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 36d2c5fda6359ddcf1ae200b2895772c 2024-11-24T03:48:01,312 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 36d2c5fda6359ddcf1ae200b2895772c 2024-11-24T03:48:01,314 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 36d2c5fda6359ddcf1ae200b2895772c 2024-11-24T03:48:01,318 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/default/TestLogRolling-testLogRollOnPipelineRestart/36d2c5fda6359ddcf1ae200b2895772c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:48:01,318 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 36d2c5fda6359ddcf1ae200b2895772c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=860181, jitterRate=0.09377725422382355}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T03:48:01,318 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 36d2c5fda6359ddcf1ae200b2895772c 2024-11-24T03:48:01,319 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 36d2c5fda6359ddcf1ae200b2895772c: Running coprocessor pre-open hook at 1732420081305Writing region info on filesystem at 1732420081305Initializing all the Stores at 1732420081306 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420081306Cleaning up temporary data from old regions at 1732420081312 (+6 ms)Running coprocessor post-open hooks at 1732420081318 (+6 ms)Region opened successfully at 1732420081319 (+1 ms) 2024-11-24T03:48:01,320 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c., pid=6, masterSystemTime=1732420081299 2024-11-24T03:48:01,322 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c. 2024-11-24T03:48:01,322 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c. 2024-11-24T03:48:01,324 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=36d2c5fda6359ddcf1ae200b2895772c, regionState=OPEN, openSeqNum=2, regionLocation=71d8d2d6408d,42303,1732420079806 2024-11-24T03:48:01,326 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 36d2c5fda6359ddcf1ae200b2895772c, server=71d8d2d6408d,42303,1732420079806 because future has completed 2024-11-24T03:48:01,337 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T03:48:01,337 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 36d2c5fda6359ddcf1ae200b2895772c, server=71d8d2d6408d,42303,1732420079806 in 189 msec 2024-11-24T03:48:01,340 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T03:48:01,340 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=36d2c5fda6359ddcf1ae200b2895772c, ASSIGN in 350 msec 2024-11-24T03:48:01,341 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T03:48:01,341 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732420081341"}]},"ts":"1732420081341"} 2024-11-24T03:48:01,343 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-24T03:48:01,344 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T03:48:01,346 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 385 msec 2024-11-24T03:48:01,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:01,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:02,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:02,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:03,084 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T03:48:03,107 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:03,108 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:03,108 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:03,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:03,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:03,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:03,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:03,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:03,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:03,119 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:03,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:03,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:04,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:04,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:05,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:05,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:06,303 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T03:48:06,305 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-24T03:48:06,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:06,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:07,406 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T03:48:07,406 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T03:48:07,407 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T03:48:07,407 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-24T03:48:07,408 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T03:48:07,408 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T03:48:07,408 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T03:48:07,408 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T03:48:07,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:07,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:08,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:08,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:09,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:09,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:11,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:11,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:11,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41523 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T03:48:11,004 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-24T03:48:11,004 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-24T03:48:11,012 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T03:48:11,012 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c. 2024-11-24T03:48:11,016 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c., hostname=71d8d2d6408d,42303,1732420079806, seqNum=2] 2024-11-24T03:48:12,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:12,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:13,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:13,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:13,019 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420080452 2024-11-24T03:48:13,021 WARN [ResponseProcessor for block BP-711475820-172.17.0.2-1732420077663:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-711475820-172.17.0.2-1732420077663:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:13,020 WARN [ResponseProcessor for block BP-711475820-172.17.0.2-1732420077663:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-711475820-172.17.0.2-1732420077663:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:13,021 WARN [ResponseProcessor for block BP-711475820-172.17.0.2-1732420077663:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-711475820-172.17.0.2-1732420077663:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:13,022 WARN [DataStreamer for file /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420080452 block BP-711475820-172.17.0.2-1732420077663:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-711475820-172.17.0.2-1732420077663:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40049,DS-cfc90098-62b0-4aa4-94ab-64556f7e3a5b,DISK], DatanodeInfoWithStorage[127.0.0.1:34135,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40049,DS-cfc90098-62b0-4aa4-94ab-64556f7e3a5b,DISK]) is bad. 2024-11-24T03:48:13,022 WARN [DataStreamer for file /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/WALs/71d8d2d6408d,41523,1732420079646/71d8d2d6408d%2C41523%2C1732420079646.1732420079958 block BP-711475820-172.17.0.2-1732420077663:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-711475820-172.17.0.2-1732420077663:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40049,DS-cfc90098-62b0-4aa4-94ab-64556f7e3a5b,DISK], DatanodeInfoWithStorage[127.0.0.1:34135,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40049,DS-cfc90098-62b0-4aa4-94ab-64556f7e3a5b,DISK]) is bad. 2024-11-24T03:48:13,022 WARN [DataStreamer for file /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.meta.1732420080825.meta block BP-711475820-172.17.0.2-1732420077663:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-711475820-172.17.0.2-1732420077663:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40049,DS-cfc90098-62b0-4aa4-94ab-64556f7e3a5b,DISK], DatanodeInfoWithStorage[127.0.0.1:34135,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40049,DS-cfc90098-62b0-4aa4-94ab-64556f7e3a5b,DISK]) is bad. 2024-11-24T03:48:13,023 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1016917803_22 at /127.0.0.1:55910 [Receiving block BP-711475820-172.17.0.2-1732420077663:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55910 dst: /127.0.0.1:40049 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:48:13,023 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1016917803_22 at /127.0.0.1:55898 [Receiving block BP-711475820-172.17.0.2-1732420077663:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55898 dst: /127.0.0.1:40049 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:48:13,024 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1016917803_22 at /127.0.0.1:37456 [Receiving block BP-711475820-172.17.0.2-1732420077663:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34135:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37456 dst: /127.0.0.1:34135 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:48:13,024 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1016917803_22 at /127.0.0.1:37460 [Receiving block BP-711475820-172.17.0.2-1732420077663:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34135:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37460 dst: /127.0.0.1:34135 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:48:13,024 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-458193953_22 at /127.0.0.1:55876 [Receiving block BP-711475820-172.17.0.2-1732420077663:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55876 dst: /127.0.0.1:40049 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:48:13,025 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-458193953_22 at /127.0.0.1:37426 [Receiving block BP-711475820-172.17.0.2-1732420077663:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34135:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37426 dst: /127.0.0.1:34135 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:48:13,063 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@32ad5d3f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:48:13,063 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3edd0360{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:48:13,063 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:48:13,064 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6fc46d5b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:48:13,064 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@14c3623d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.log.dir/,STOPPED} 2024-11-24T03:48:13,065 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:48:13,065 WARN [BP-711475820-172.17.0.2-1732420077663 heartbeating to localhost/127.0.0.1:35611 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:48:13,065 WARN [BP-711475820-172.17.0.2-1732420077663 heartbeating to localhost/127.0.0.1:35611 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-711475820-172.17.0.2-1732420077663 (Datanode Uuid 0ccea49f-235e-49bb-a172-1d425c73d758) service to localhost/127.0.0.1:35611 2024-11-24T03:48:13,065 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:48:13,066 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/data/data3/current/BP-711475820-172.17.0.2-1732420077663 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:48:13,067 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/data/data4/current/BP-711475820-172.17.0.2-1732420077663 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:48:13,067 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:48:13,076 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:48:13,079 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:48:13,080 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:48:13,080 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:48:13,080 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T03:48:13,081 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6086fda4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:48:13,081 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66618ea8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:48:13,172 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@17e23ece{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/java.io.tmpdir/jetty-localhost-41033-hadoop-hdfs-3_4_1-tests_jar-_-any-6193873843433634354/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:48:13,173 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6ab8027a{HTTP/1.1, (http/1.1)}{localhost:41033} 2024-11-24T03:48:13,173 INFO [Time-limited test {}] server.Server(415): Started @172908ms 2024-11-24T03:48:13,174 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:48:13,191 WARN [ResponseProcessor for block BP-711475820-172.17.0.2-1732420077663:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-711475820-172.17.0.2-1732420077663:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:13,191 WARN [ResponseProcessor for block BP-711475820-172.17.0.2-1732420077663:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-711475820-172.17.0.2-1732420077663:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:13,191 WARN [ResponseProcessor for block BP-711475820-172.17.0.2-1732420077663:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-711475820-172.17.0.2-1732420077663:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:13,192 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1016917803_22 at /127.0.0.1:39642 [Receiving block BP-711475820-172.17.0.2-1732420077663:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34135:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39642 dst: /127.0.0.1:34135 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:48:13,192 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1016917803_22 at /127.0.0.1:39628 [Receiving block BP-711475820-172.17.0.2-1732420077663:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34135:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39628 dst: /127.0.0.1:34135 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:48:13,192 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-458193953_22 at /127.0.0.1:39644 [Receiving block BP-711475820-172.17.0.2-1732420077663:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34135:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39644 dst: /127.0.0.1:34135 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:48:13,196 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14694414{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:48:13,196 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@567663d4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:48:13,197 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:48:13,197 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a9e9581{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:48:13,197 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30acbc29{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.log.dir/,STOPPED} 2024-11-24T03:48:13,198 WARN [BP-711475820-172.17.0.2-1732420077663 heartbeating to localhost/127.0.0.1:35611 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:48:13,198 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:48:13,198 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:48:13,198 WARN [BP-711475820-172.17.0.2-1732420077663 heartbeating to localhost/127.0.0.1:35611 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-711475820-172.17.0.2-1732420077663 (Datanode Uuid 5629a8ec-134d-4be7-b6e9-1ac3e524de47) service to localhost/127.0.0.1:35611 2024-11-24T03:48:13,199 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/data/data1/current/BP-711475820-172.17.0.2-1732420077663 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:48:13,199 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/data/data2/current/BP-711475820-172.17.0.2-1732420077663 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:48:13,199 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:48:13,208 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:48:13,214 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:48:13,215 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:48:13,215 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:48:13,215 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T03:48:13,215 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41c32822{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:48:13,215 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77d9e6f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:48:13,309 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@103ab168{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/java.io.tmpdir/jetty-localhost-45097-hadoop-hdfs-3_4_1-tests_jar-_-any-10175813947150153690/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:48:13,310 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6be1f89e{HTTP/1.1, (http/1.1)}{localhost:45097} 2024-11-24T03:48:13,310 INFO [Time-limited test {}] server.Server(415): Started @173045ms 2024-11-24T03:48:13,311 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:48:13,680 WARN [Thread-1334 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:48:13,682 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3f87fabbdc94125 with lease ID 0xae5b0f24c83c1296: from storage DS-cfc90098-62b0-4aa4-94ab-64556f7e3a5b node DatanodeRegistration(127.0.0.1:43907, datanodeUuid=0ccea49f-235e-49bb-a172-1d425c73d758, infoPort=41351, infoSecurePort=0, ipcPort=36643, storageInfo=lv=-57;cid=testClusterID;nsid=458153394;c=1732420077663), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:48:13,682 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3f87fabbdc94125 with lease ID 0xae5b0f24c83c1296: from storage DS-751db3f8-b324-4f00-8918-2615bc885718 node DatanodeRegistration(127.0.0.1:43907, datanodeUuid=0ccea49f-235e-49bb-a172-1d425c73d758, infoPort=41351, infoSecurePort=0, ipcPort=36643, storageInfo=lv=-57;cid=testClusterID;nsid=458153394;c=1732420077663), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:48:13,751 WARN [Thread-1354 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:48:13,754 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xccef36cd013decb with lease ID 0xae5b0f24c83c1297: from storage DS-7c23efdc-9dc5-488f-8d77-731691371aa6 node DatanodeRegistration(127.0.0.1:36911, datanodeUuid=5629a8ec-134d-4be7-b6e9-1ac3e524de47, infoPort=32975, infoSecurePort=0, ipcPort=46179, storageInfo=lv=-57;cid=testClusterID;nsid=458153394;c=1732420077663), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:48:13,754 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xccef36cd013decb with lease ID 0xae5b0f24c83c1297: from storage DS-adf569e4-4af8-477f-8b82-607f950c86d0 node DatanodeRegistration(127.0.0.1:36911, datanodeUuid=5629a8ec-134d-4be7-b6e9-1ac3e524de47, infoPort=32975, infoSecurePort=0, ipcPort=46179, storageInfo=lv=-57;cid=testClusterID;nsid=458153394;c=1732420077663), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:48:14,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:14,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:14,328 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-24T03:48:14,333 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-24T03:48:14,335 ERROR [FSHLog-0-hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef-prefix:71d8d2d6408d,42303,1732420079806 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34135,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:14,335 WARN [FSHLog-0-hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef-prefix:71d8d2d6408d,42303,1732420079806 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34135,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:14,335 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 71d8d2d6408d%2C42303%2C1732420079806:(num 1732420080452) roll requested 2024-11-24T03:48:14,336 INFO [regionserver/71d8d2d6408d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C42303%2C1732420079806.1732420094335 2024-11-24T03:48:14,342 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420080452 newFile=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 2024-11-24T03:48:14,342 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:14,342 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:14,342 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:14,343 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:14,343 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:14,343 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420080452 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 2024-11-24T03:48:14,343 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34135,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:14,344 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34135,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:14,344 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420080452 2024-11-24T03:48:14,344 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32975:32975),(127.0.0.1/127.0.0.1:41351:41351)] 2024-11-24T03:48:14,344 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420080452 is not closed yet, will try archiving it next time 2024-11-24T03:48:14,344 WARN [IPC Server handler 0 on default port 35611 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420080452 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-24T03:48:14,344 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420080452 after 0ms 2024-11-24T03:48:15,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:15,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:16,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:16,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:16,349 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-24T03:48:16,683 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T03:48:17,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:17,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:18,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:18,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:18,346 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420080452 after 4002ms 2024-11-24T03:48:18,354 WARN [ResponseProcessor for block BP-711475820-172.17.0.2-1732420077663:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-711475820-172.17.0.2-1732420077663:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:18,355 WARN [DataStreamer for file /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 block BP-711475820-172.17.0.2-1732420077663:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-711475820-172.17.0.2-1732420077663:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36911,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK], DatanodeInfoWithStorage[127.0.0.1:43907,DS-cfc90098-62b0-4aa4-94ab-64556f7e3a5b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36911,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]) is bad. 2024-11-24T03:48:18,357 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1016917803_22 at /127.0.0.1:41038 [Receiving block BP-711475820-172.17.0.2-1732420077663:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41038 dst: /127.0.0.1:43907 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:48:18,356 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1016917803_22 at /127.0.0.1:46966 [Receiving block BP-711475820-172.17.0.2-1732420077663:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46966 dst: /127.0.0.1:36911 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:48:18,395 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@103ab168{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:48:18,396 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6be1f89e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:48:18,396 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:48:18,396 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77d9e6f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:48:18,396 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41c32822{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.log.dir/,STOPPED} 2024-11-24T03:48:18,398 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:48:18,398 WARN [BP-711475820-172.17.0.2-1732420077663 heartbeating to localhost/127.0.0.1:35611 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:48:18,398 WARN [BP-711475820-172.17.0.2-1732420077663 heartbeating to localhost/127.0.0.1:35611 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-711475820-172.17.0.2-1732420077663 (Datanode Uuid 5629a8ec-134d-4be7-b6e9-1ac3e524de47) service to localhost/127.0.0.1:35611 2024-11-24T03:48:18,398 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:48:18,399 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/data/data1/current/BP-711475820-172.17.0.2-1732420077663 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:48:18,400 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/data/data2/current/BP-711475820-172.17.0.2-1732420077663 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:48:18,400 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:48:18,409 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:48:18,414 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:48:18,415 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:48:18,415 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:48:18,415 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T03:48:18,416 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@444b0df5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:48:18,416 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1703ac9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:48:18,507 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@ce34ef2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/java.io.tmpdir/jetty-localhost-36925-hadoop-hdfs-3_4_1-tests_jar-_-any-1589327872931389409/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:48:18,507 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@23d7111{HTTP/1.1, (http/1.1)}{localhost:36925} 2024-11-24T03:48:18,507 INFO [Time-limited test {}] server.Server(415): Started @178243ms 2024-11-24T03:48:18,508 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:48:18,527 WARN [ResponseProcessor for block BP-711475820-172.17.0.2-1732420077663:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-711475820-172.17.0.2-1732420077663:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:18,528 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1016917803_22 at /127.0.0.1:41054 [Receiving block BP-711475820-172.17.0.2-1732420077663:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41054 dst: /127.0.0.1:43907 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:48:18,538 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@17e23ece{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:48:18,539 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6ab8027a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:48:18,539 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:48:18,539 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66618ea8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:48:18,539 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6086fda4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.log.dir/,STOPPED} 2024-11-24T03:48:18,541 WARN [BP-711475820-172.17.0.2-1732420077663 heartbeating to localhost/127.0.0.1:35611 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:48:18,541 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:48:18,541 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:48:18,541 WARN [BP-711475820-172.17.0.2-1732420077663 heartbeating to localhost/127.0.0.1:35611 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-711475820-172.17.0.2-1732420077663 (Datanode Uuid 0ccea49f-235e-49bb-a172-1d425c73d758) service to localhost/127.0.0.1:35611 2024-11-24T03:48:18,541 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/data/data3/current/BP-711475820-172.17.0.2-1732420077663 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:48:18,542 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/data/data4/current/BP-711475820-172.17.0.2-1732420077663 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:48:18,542 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:48:18,555 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:48:18,560 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:48:18,560 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:48:18,560 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:48:18,560 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T03:48:18,561 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6db2908f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:48:18,561 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b5224f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:48:18,659 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49afdd6a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/java.io.tmpdir/jetty-localhost-44521-hadoop-hdfs-3_4_1-tests_jar-_-any-16248543090927225232/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:48:18,659 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@785865cb{HTTP/1.1, (http/1.1)}{localhost:44521} 2024-11-24T03:48:18,659 INFO [Time-limited test {}] server.Server(415): Started @178394ms 2024-11-24T03:48:18,660 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:48:18,934 WARN [Thread-1408 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:48:18,936 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x66192218df56a8f2 with lease ID 0xae5b0f24c83c1298: from storage DS-7c23efdc-9dc5-488f-8d77-731691371aa6 node DatanodeRegistration(127.0.0.1:45421, datanodeUuid=5629a8ec-134d-4be7-b6e9-1ac3e524de47, infoPort=39921, infoSecurePort=0, ipcPort=45295, storageInfo=lv=-57;cid=testClusterID;nsid=458153394;c=1732420077663), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T03:48:18,937 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x66192218df56a8f2 with lease ID 0xae5b0f24c83c1298: from storage DS-adf569e4-4af8-477f-8b82-607f950c86d0 node DatanodeRegistration(127.0.0.1:45421, datanodeUuid=5629a8ec-134d-4be7-b6e9-1ac3e524de47, infoPort=39921, infoSecurePort=0, ipcPort=45295, storageInfo=lv=-57;cid=testClusterID;nsid=458153394;c=1732420077663), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:48:19,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:19,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:19,046 WARN [Thread-1428 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:48:19,048 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x76a912ff179e094f with lease ID 0xae5b0f24c83c1299: from storage DS-cfc90098-62b0-4aa4-94ab-64556f7e3a5b node DatanodeRegistration(127.0.0.1:42297, datanodeUuid=0ccea49f-235e-49bb-a172-1d425c73d758, infoPort=35525, infoSecurePort=0, ipcPort=42525, storageInfo=lv=-57;cid=testClusterID;nsid=458153394;c=1732420077663), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:48:19,048 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x76a912ff179e094f with lease ID 0xae5b0f24c83c1299: from storage DS-751db3f8-b324-4f00-8918-2615bc885718 node DatanodeRegistration(127.0.0.1:42297, datanodeUuid=0ccea49f-235e-49bb-a172-1d425c73d758, infoPort=35525, infoSecurePort=0, ipcPort=42525, storageInfo=lv=-57;cid=testClusterID;nsid=458153394;c=1732420077663), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:48:19,677 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-24T03:48:19,683 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-24T03:48:19,685 ERROR [FSHLog-0-hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef-prefix:71d8d2d6408d,42303,1732420079806 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43907,DS-cfc90098-62b0-4aa4-94ab-64556f7e3a5b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:19,685 WARN [FSHLog-0-hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef-prefix:71d8d2d6408d,42303,1732420079806 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43907,DS-cfc90098-62b0-4aa4-94ab-64556f7e3a5b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:19,685 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 71d8d2d6408d%2C42303%2C1732420079806:(num 1732420094335) roll requested 2024-11-24T03:48:19,685 INFO [regionserver/71d8d2d6408d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C42303%2C1732420079806.1732420099685 2024-11-24T03:48:19,695 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 newFile=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420099685 2024-11-24T03:48:19,695 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:19,695 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:19,695 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:19,695 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:19,695 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:19,696 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420099685 2024-11-24T03:48:19,696 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43907,DS-cfc90098-62b0-4aa4-94ab-64556f7e3a5b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:19,696 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43907,DS-cfc90098-62b0-4aa4-94ab-64556f7e3a5b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:19,696 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 2024-11-24T03:48:19,696 WARN [IPC Server handler 3 on default port 35611 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-24T03:48:19,697 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 after 1ms 2024-11-24T03:48:19,699 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39921:39921),(127.0.0.1/127.0.0.1:35525:35525)] 2024-11-24T03:48:19,699 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 is not closed yet, will try archiving it next time 2024-11-24T03:48:20,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:20,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:21,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:21,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:21,701 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C42303%2C1732420079806.1732420101701 2024-11-24T03:48:21,707 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420099685 newFile=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 2024-11-24T03:48:21,707 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:21,707 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:21,707 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:21,707 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:21,707 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:21,708 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420099685 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 2024-11-24T03:48:21,709 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39921:39921),(127.0.0.1/127.0.0.1:35525:35525)] 2024-11-24T03:48:21,709 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 is not closed yet, will try archiving it next time 2024-11-24T03:48:21,709 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420099685 is not closed yet, will try archiving it next time 2024-11-24T03:48:21,710 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420080452 2024-11-24T03:48:21,710 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420080452 2024-11-24T03:48:21,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42297 is added to blk_1073741838_1019 (size=1264) 2024-11-24T03:48:21,710 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420080452 after 0ms 2024-11-24T03:48:21,710 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420080452 2024-11-24T03:48:21,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45421 is added to blk_1073741838_1019 (size=1264) 2024-11-24T03:48:21,717 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 is not closed yet, will try archiving it next time 2024-11-24T03:48:21,730 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732420081319/Put/vlen=218/seqid=0] 2024-11-24T03:48:21,731 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732420091017/Put/vlen=1045/seqid=0] 2024-11-24T03:48:21,731 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420080452 2024-11-24T03:48:21,731 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 2024-11-24T03:48:21,731 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 2024-11-24T03:48:21,731 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 after 0ms 2024-11-24T03:48:21,732 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 2024-11-24T03:48:21,736 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732420094335/Put/vlen=1045/seqid=0] 2024-11-24T03:48:21,736 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732420096351/Put/vlen=1045/seqid=0] 2024-11-24T03:48:21,737 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 2024-11-24T03:48:21,737 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420099685 2024-11-24T03:48:21,737 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420099685 2024-11-24T03:48:21,737 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420099685 after 0ms 2024-11-24T03:48:21,737 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420099685 2024-11-24T03:48:21,744 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732420099685/Put/vlen=1045/seqid=0] 2024-11-24T03:48:21,744 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 2024-11-24T03:48:21,744 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 2024-11-24T03:48:21,745 WARN [IPC Server handler 4 on default port 35611 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-24T03:48:21,745 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 after 1ms 2024-11-24T03:48:22,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:22,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:22,056 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-458193953_22 at /127.0.0.1:52898 [Receiving block BP-711475820-172.17.0.2-1732420077663:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:45421:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52898 dst: /127.0.0.1:45421 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:45421 remote=/127.0.0.1:52898]. Total timeout mills is 60000, 59651 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:48:22,056 WARN [ResponseProcessor for block BP-711475820-172.17.0.2-1732420077663:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-711475820-172.17.0.2-1732420077663:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:22,056 WARN [DataStreamer for file /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 block BP-711475820-172.17.0.2-1732420077663:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-711475820-172.17.0.2-1732420077663:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45421,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK], DatanodeInfoWithStorage[127.0.0.1:42297,DS-cfc90098-62b0-4aa4-94ab-64556f7e3a5b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45421,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]) is bad. 2024-11-24T03:48:22,056 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-458193953_22 at /127.0.0.1:60964 [Receiving block BP-711475820-172.17.0.2-1732420077663:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42297:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60964 dst: /127.0.0.1:42297 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:48:22,057 WARN [DataStreamer for file /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 block BP-711475820-172.17.0.2-1732420077663:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-711475820-172.17.0.2-1732420077663:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:22,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45421 is added to blk_1073741839_1022 (size=85) 2024-11-24T03:48:22,936 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T03:48:23,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:23,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:23,698 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420094335 after 4002ms 2024-11-24T03:48:24,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:24,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:25,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:25,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:25,746 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 after 4002ms 2024-11-24T03:48:25,746 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 2024-11-24T03:48:25,751 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 2024-11-24T03:48:25,751 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-24T03:48:25,752 ERROR [FSHLog-0-hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef-prefix:71d8d2d6408d,42303,1732420079806.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34135,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:25,752 WARN [FSHLog-0-hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef-prefix:71d8d2d6408d,42303,1732420079806.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34135,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:25,752 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 71d8d2d6408d%2C42303%2C1732420079806.meta:.meta(num 1732420080825) roll requested 2024-11-24T03:48:25,753 INFO [regionserver/71d8d2d6408d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C42303%2C1732420079806.meta.1732420105752.meta 2024-11-24T03:48:25,759 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:25,759 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:25,759 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:25,759 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:25,760 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:25,760 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.meta.1732420080825.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.meta.1732420105752.meta 2024-11-24T03:48:25,760 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34135,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:25,760 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34135,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:25,760 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.meta.1732420080825.meta 2024-11-24T03:48:25,761 WARN [IPC Server handler 3 on default port 35611 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.meta.1732420080825.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1013 2024-11-24T03:48:25,761 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.meta.1732420080825.meta after 0ms 2024-11-24T03:48:25,776 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35525:35525),(127.0.0.1/127.0.0.1:39921:39921)] 2024-11-24T03:48:25,776 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.meta.1732420080825.meta is not closed yet, will try archiving it next time 2024-11-24T03:48:25,799 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/.tmp/info/f5810a6b469d4f6d884c22f3b06be21f is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c./info:regioninfo/1732420081323/Put/seqid=0 2024-11-24T03:48:25,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45421 is added to blk_1073741841_1025 (size=7125) 2024-11-24T03:48:25,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42297 is added to blk_1073741841_1025 (size=7125) 2024-11-24T03:48:25,809 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/.tmp/info/f5810a6b469d4f6d884c22f3b06be21f 2024-11-24T03:48:25,837 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/.tmp/ns/8fcb55a57c1749349bc533a620f6125d is 43, key is default/ns:d/1732420080913/Put/seqid=0 2024-11-24T03:48:25,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42297 is added to blk_1073741842_1026 (size=5153) 2024-11-24T03:48:25,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45421 is added to blk_1073741842_1026 (size=5153) 2024-11-24T03:48:25,845 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/.tmp/ns/8fcb55a57c1749349bc533a620f6125d 2024-11-24T03:48:25,875 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/.tmp/table/88cbff5a27414e9198c3d7e537fde193 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732420081341/Put/seqid=0 2024-11-24T03:48:25,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45421 is added to blk_1073741843_1027 (size=5438) 2024-11-24T03:48:25,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42297 is added to blk_1073741843_1027 (size=5438) 2024-11-24T03:48:25,892 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/.tmp/table/88cbff5a27414e9198c3d7e537fde193 2024-11-24T03:48:25,909 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/.tmp/info/f5810a6b469d4f6d884c22f3b06be21f as hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/info/f5810a6b469d4f6d884c22f3b06be21f 2024-11-24T03:48:25,919 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/info/f5810a6b469d4f6d884c22f3b06be21f, entries=10, sequenceid=11, filesize=7.0 K 2024-11-24T03:48:25,920 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/.tmp/ns/8fcb55a57c1749349bc533a620f6125d as hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/ns/8fcb55a57c1749349bc533a620f6125d 2024-11-24T03:48:25,928 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/ns/8fcb55a57c1749349bc533a620f6125d, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T03:48:25,930 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/.tmp/table/88cbff5a27414e9198c3d7e537fde193 as hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/table/88cbff5a27414e9198c3d7e537fde193 2024-11-24T03:48:25,937 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/table/88cbff5a27414e9198c3d7e537fde193, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T03:48:25,939 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 188ms, sequenceid=11, compaction requested=false 2024-11-24T03:48:25,939 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T03:48:25,939 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 36d2c5fda6359ddcf1ae200b2895772c 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-24T03:48:25,940 ERROR [FSHLog-0-hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef-prefix:71d8d2d6408d,42303,1732420079806 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-711475820-172.17.0.2-1732420077663:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:25,940 WARN [FSHLog-0-hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef-prefix:71d8d2d6408d,42303,1732420079806 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-711475820-172.17.0.2-1732420077663:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:25,940 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 71d8d2d6408d%2C42303%2C1732420079806:(num 1732420101701) roll requested 2024-11-24T03:48:25,941 INFO [regionserver/71d8d2d6408d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C42303%2C1732420079806.1732420105941 2024-11-24T03:48:25,947 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 newFile=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420105941 2024-11-24T03:48:25,947 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:25,947 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:25,947 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:25,947 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:25,947 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:25,948 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420105941 2024-11-24T03:48:25,948 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-711475820-172.17.0.2-1732420077663:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:25,948 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-711475820-172.17.0.2-1732420077663:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:25,949 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 2024-11-24T03:48:25,949 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 after 0ms 2024-11-24T03:48:25,955 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 to hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/oldWALs/71d8d2d6408d%2C42303%2C1732420079806.1732420101701 2024-11-24T03:48:25,963 DEBUG [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39921:39921),(127.0.0.1/127.0.0.1:35525:35525)] 2024-11-24T03:48:25,984 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/default/TestLogRolling-testLogRollOnPipelineRestart/36d2c5fda6359ddcf1ae200b2895772c/.tmp/info/cdddfbda2eac49859de7b7ea190bc2f8 is 1080, key is row1002/info:/1732420091017/Put/seqid=0 2024-11-24T03:48:26,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45421 is added to blk_1073741845_1029 (size=9270) 2024-11-24T03:48:26,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42297 is added to blk_1073741845_1029 (size=9270) 2024-11-24T03:48:26,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:26,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:26,406 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/default/TestLogRolling-testLogRollOnPipelineRestart/36d2c5fda6359ddcf1ae200b2895772c/.tmp/info/cdddfbda2eac49859de7b7ea190bc2f8 2024-11-24T03:48:26,414 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/default/TestLogRolling-testLogRollOnPipelineRestart/36d2c5fda6359ddcf1ae200b2895772c/.tmp/info/cdddfbda2eac49859de7b7ea190bc2f8 as hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/default/TestLogRolling-testLogRollOnPipelineRestart/36d2c5fda6359ddcf1ae200b2895772c/info/cdddfbda2eac49859de7b7ea190bc2f8 2024-11-24T03:48:26,421 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/default/TestLogRolling-testLogRollOnPipelineRestart/36d2c5fda6359ddcf1ae200b2895772c/info/cdddfbda2eac49859de7b7ea190bc2f8, entries=4, sequenceid=8, filesize=9.1 K 2024-11-24T03:48:26,423 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 36d2c5fda6359ddcf1ae200b2895772c in 484ms, sequenceid=8, compaction requested=false 2024-11-24T03:48:26,423 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 36d2c5fda6359ddcf1ae200b2895772c: 2024-11-24T03:48:26,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T03:48:26,430 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T03:48:26,431 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:48:26,431 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:48:26,431 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:48:26,431 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T03:48:26,431 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=89907425, stopped=false 2024-11-24T03:48:26,431 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T03:48:26,431 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=71d8d2d6408d,41523,1732420079646 2024-11-24T03:48:26,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T03:48:26,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T03:48:26,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:26,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:26,484 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T03:48:26,484 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T03:48:26,484 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:48:26,484 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:48:26,484 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:48:26,484 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '71d8d2d6408d,42303,1732420079806' ***** 2024-11-24T03:48:26,485 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T03:48:26,485 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T03:48:26,485 INFO [RS:0;71d8d2d6408d:42303 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T03:48:26,485 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T03:48:26,485 INFO [RS:0;71d8d2d6408d:42303 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T03:48:26,485 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:48:26,485 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer(3091): Received CLOSE for 36d2c5fda6359ddcf1ae200b2895772c 2024-11-24T03:48:26,485 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer(959): stopping server 71d8d2d6408d,42303,1732420079806 2024-11-24T03:48:26,485 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T03:48:26,485 INFO [RS:0;71d8d2d6408d:42303 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;71d8d2d6408d:42303. 2024-11-24T03:48:26,486 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 36d2c5fda6359ddcf1ae200b2895772c, disabling compactions & flushes 2024-11-24T03:48:26,486 DEBUG [RS:0;71d8d2d6408d:42303 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:48:26,486 DEBUG [RS:0;71d8d2d6408d:42303 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:48:26,486 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c. 2024-11-24T03:48:26,486 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c. 2024-11-24T03:48:26,486 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T03:48:26,486 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T03:48:26,486 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c. after waiting 0 ms 2024-11-24T03:48:26,486 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T03:48:26,486 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c. 2024-11-24T03:48:26,486 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T03:48:26,486 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T03:48:26,486 DEBUG [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 36d2c5fda6359ddcf1ae200b2895772c=TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c.} 2024-11-24T03:48:26,486 DEBUG [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 36d2c5fda6359ddcf1ae200b2895772c 2024-11-24T03:48:26,486 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T03:48:26,486 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T03:48:26,486 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T03:48:26,486 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T03:48:26,486 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T03:48:26,491 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/default/TestLogRolling-testLogRollOnPipelineRestart/36d2c5fda6359ddcf1ae200b2895772c/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-24T03:48:26,491 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T03:48:26,492 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c. 2024-11-24T03:48:26,492 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T03:48:26,492 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 36d2c5fda6359ddcf1ae200b2895772c: Waiting for close lock at 1732420106485Running coprocessor pre-close hooks at 1732420106485Disabling compacts and flushes for region at 1732420106485Disabling writes for close at 1732420106486 (+1 ms)Writing region close event to WAL at 1732420106487 (+1 ms)Running coprocessor post-close hooks at 1732420106491 (+4 ms)Closed at 1732420106492 (+1 ms) 2024-11-24T03:48:26,492 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T03:48:26,492 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732420080959.36d2c5fda6359ddcf1ae200b2895772c. 2024-11-24T03:48:26,492 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732420106486Running coprocessor pre-close hooks at 1732420106486Disabling compacts and flushes for region at 1732420106486Disabling writes for close at 1732420106486Writing region close event to WAL at 1732420106488 (+2 ms)Running coprocessor post-close hooks at 1732420106492 (+4 ms)Closed at 1732420106492 2024-11-24T03:48:26,492 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T03:48:26,686 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer(976): stopping server 71d8d2d6408d,42303,1732420079806; all regions closed. 2024-11-24T03:48:26,687 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:26,687 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:26,687 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:26,688 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:26,688 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:26,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45421 is added to blk_1073741840_1023 (size=825) 2024-11-24T03:48:26,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42297 is added to blk_1073741840_1023 (size=825) 2024-11-24T03:48:27,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:27,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:27,312 INFO [regionserver/71d8d2d6408d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T03:48:27,312 INFO [regionserver/71d8d2d6408d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T03:48:27,406 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T03:48:27,407 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T03:48:27,407 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T03:48:28,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:28,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:28,315 INFO [regionserver/71d8d2d6408d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T03:48:29,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:29,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:29,052 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T03:48:29,625 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T03:48:29,762 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.meta.1732420080825.meta after 4001ms 2024-11-24T03:48:29,763 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/WALs/71d8d2d6408d,42303,1732420079806/71d8d2d6408d%2C42303%2C1732420079806.meta.1732420080825.meta to hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/oldWALs/71d8d2d6408d%2C42303%2C1732420079806.meta.1732420080825.meta 2024-11-24T03:48:29,767 DEBUG [RS:0;71d8d2d6408d:42303 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/oldWALs 2024-11-24T03:48:29,767 INFO [RS:0;71d8d2d6408d:42303 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 71d8d2d6408d%2C42303%2C1732420079806.meta:.meta(num 1732420105752) 2024-11-24T03:48:29,767 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:29,767 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:29,767 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:29,768 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:29,768 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:29,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42297 is added to blk_1073741844_1028 (size=1162) 2024-11-24T03:48:29,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45421 is added to blk_1073741844_1028 (size=1162) 2024-11-24T03:48:29,776 DEBUG [RS:0;71d8d2d6408d:42303 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/oldWALs 2024-11-24T03:48:29,776 INFO [RS:0;71d8d2d6408d:42303 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 71d8d2d6408d%2C42303%2C1732420079806:(num 1732420105941) 2024-11-24T03:48:29,776 DEBUG [RS:0;71d8d2d6408d:42303 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:48:29,776 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T03:48:29,776 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T03:48:29,777 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.ChoreService(370): Chore service for: regionserver/71d8d2d6408d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T03:48:29,777 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T03:48:29,777 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T03:48:29,777 INFO [RS:0;71d8d2d6408d:42303 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42303 2024-11-24T03:48:29,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T03:48:29,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/71d8d2d6408d,42303,1732420079806 2024-11-24T03:48:29,808 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T03:48:29,816 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [71d8d2d6408d,42303,1732420079806] 2024-11-24T03:48:29,824 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/71d8d2d6408d,42303,1732420079806 already deleted, retry=false 2024-11-24T03:48:29,825 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 71d8d2d6408d,42303,1732420079806 expired; onlineServers=0 2024-11-24T03:48:29,825 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '71d8d2d6408d,41523,1732420079646' ***** 2024-11-24T03:48:29,825 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T03:48:29,825 INFO [M:0;71d8d2d6408d:41523 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T03:48:29,825 INFO [M:0;71d8d2d6408d:41523 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T03:48:29,825 DEBUG [M:0;71d8d2d6408d:41523 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T03:48:29,825 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T03:48:29,825 DEBUG [M:0;71d8d2d6408d:41523 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T03:48:29,825 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420080183 {}] cleaner.HFileCleaner(306): Exit Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420080183,5,FailOnTimeoutGroup] 2024-11-24T03:48:29,825 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420080183 {}] cleaner.HFileCleaner(306): Exit Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420080183,5,FailOnTimeoutGroup] 2024-11-24T03:48:29,826 INFO [M:0;71d8d2d6408d:41523 {}] hbase.ChoreService(370): Chore service for: master/71d8d2d6408d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T03:48:29,826 INFO [M:0;71d8d2d6408d:41523 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T03:48:29,826 DEBUG [M:0;71d8d2d6408d:41523 {}] master.HMaster(1795): Stopping service threads 2024-11-24T03:48:29,826 INFO [M:0;71d8d2d6408d:41523 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T03:48:29,826 INFO [M:0;71d8d2d6408d:41523 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T03:48:29,826 INFO [M:0;71d8d2d6408d:41523 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T03:48:29,826 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T03:48:29,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T03:48:29,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:29,833 DEBUG [M:0;71d8d2d6408d:41523 {}] zookeeper.ZKUtil(347): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T03:48:29,833 WARN [M:0;71d8d2d6408d:41523 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T03:48:29,834 INFO [M:0;71d8d2d6408d:41523 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/.lastflushedseqids 2024-11-24T03:48:29,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42297 is added to blk_1073741846_1030 (size=120) 2024-11-24T03:48:29,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45421 is added to blk_1073741846_1030 (size=120) 2024-11-24T03:48:29,841 INFO [M:0;71d8d2d6408d:41523 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T03:48:29,841 INFO [M:0;71d8d2d6408d:41523 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T03:48:29,841 DEBUG [M:0;71d8d2d6408d:41523 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T03:48:29,841 INFO [M:0;71d8d2d6408d:41523 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:48:29,841 DEBUG [M:0;71d8d2d6408d:41523 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:48:29,841 DEBUG [M:0;71d8d2d6408d:41523 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T03:48:29,841 DEBUG [M:0;71d8d2d6408d:41523 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:48:29,841 INFO [M:0;71d8d2d6408d:41523 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-24T03:48:29,842 ERROR [FSHLog-0-hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData-prefix:71d8d2d6408d,41523,1732420079646 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34135,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:29,842 WARN [FSHLog-0-hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData-prefix:71d8d2d6408d,41523,1732420079646 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34135,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:29,842 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 71d8d2d6408d%2C41523%2C1732420079646:(num 1732420079958) roll requested 2024-11-24T03:48:29,842 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C41523%2C1732420079646.1732420109842 2024-11-24T03:48:29,849 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:29,849 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:29,849 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:29,849 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:29,849 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:29,850 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/WALs/71d8d2d6408d,41523,1732420079646/71d8d2d6408d%2C41523%2C1732420079646.1732420079958 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/WALs/71d8d2d6408d,41523,1732420079646/71d8d2d6408d%2C41523%2C1732420079646.1732420109842 2024-11-24T03:48:29,850 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34135,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:29,850 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34135,DS-7c23efdc-9dc5-488f-8d77-731691371aa6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T03:48:29,850 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/WALs/71d8d2d6408d,41523,1732420079646/71d8d2d6408d%2C41523%2C1732420079646.1732420079958 2024-11-24T03:48:29,850 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35525:35525),(127.0.0.1/127.0.0.1:39921:39921)] 2024-11-24T03:48:29,850 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/WALs/71d8d2d6408d,41523,1732420079646/71d8d2d6408d%2C41523%2C1732420079646.1732420079958 is not closed yet, will try archiving it next time 2024-11-24T03:48:29,850 WARN [IPC Server handler 3 on default port 35611 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/WALs/71d8d2d6408d,41523,1732420079646/71d8d2d6408d%2C41523%2C1732420079646.1732420079958 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-24T03:48:29,851 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/WALs/71d8d2d6408d,41523,1732420079646/71d8d2d6408d%2C41523%2C1732420079646.1732420079958 after 1ms 2024-11-24T03:48:29,866 DEBUG [M:0;71d8d2d6408d:41523 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8d248b590fda4c489eeeaa8b1b8abeb8 is 82, key is hbase:meta,,1/info:regioninfo/1732420080851/Put/seqid=0 2024-11-24T03:48:29,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42297 is added to blk_1073741848_1033 (size=5672) 2024-11-24T03:48:29,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45421 is added to blk_1073741848_1033 (size=5672) 2024-11-24T03:48:29,871 INFO [M:0;71d8d2d6408d:41523 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8d248b590fda4c489eeeaa8b1b8abeb8 2024-11-24T03:48:29,893 DEBUG [M:0;71d8d2d6408d:41523 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/15969415491f4d7aa8c581e4e92e37ca is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732420081345/Put/seqid=0 2024-11-24T03:48:29,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45421 is added to blk_1073741849_1034 (size=6119) 2024-11-24T03:48:29,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42297 is added to blk_1073741849_1034 (size=6119) 2024-11-24T03:48:29,901 INFO [M:0;71d8d2d6408d:41523 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/15969415491f4d7aa8c581e4e92e37ca 2024-11-24T03:48:29,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:48:29,916 INFO [RS:0;71d8d2d6408d:42303 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T03:48:29,916 INFO [RS:0;71d8d2d6408d:42303 {}] regionserver.HRegionServer(1031): Exiting; stopping=71d8d2d6408d,42303,1732420079806; zookeeper connection closed. 2024-11-24T03:48:29,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42303-0x1016c3e22d20001, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:48:29,917 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@68e38fa5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@68e38fa5 2024-11-24T03:48:29,917 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T03:48:29,921 DEBUG [M:0;71d8d2d6408d:41523 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7cf567e38c4243129baa567649a4f8fe is 69, key is 71d8d2d6408d,42303,1732420079806/rs:state/1732420080285/Put/seqid=0 2024-11-24T03:48:29,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42297 is added to blk_1073741850_1035 (size=5156) 2024-11-24T03:48:29,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45421 is added to blk_1073741850_1035 (size=5156) 2024-11-24T03:48:29,927 INFO [M:0;71d8d2d6408d:41523 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7cf567e38c4243129baa567649a4f8fe 2024-11-24T03:48:29,949 DEBUG [M:0;71d8d2d6408d:41523 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bae5f434cbdc48d1aef0fbdca733c84f is 52, key is load_balancer_on/state:d/1732420080951/Put/seqid=0 2024-11-24T03:48:29,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42297 is added to blk_1073741851_1036 (size=5056) 2024-11-24T03:48:29,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45421 is added to blk_1073741851_1036 (size=5056) 2024-11-24T03:48:29,956 INFO [M:0;71d8d2d6408d:41523 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bae5f434cbdc48d1aef0fbdca733c84f 2024-11-24T03:48:29,962 DEBUG [M:0;71d8d2d6408d:41523 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8d248b590fda4c489eeeaa8b1b8abeb8 as hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8d248b590fda4c489eeeaa8b1b8abeb8 2024-11-24T03:48:29,968 INFO [M:0;71d8d2d6408d:41523 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8d248b590fda4c489eeeaa8b1b8abeb8, entries=8, sequenceid=56, filesize=5.5 K 2024-11-24T03:48:29,969 DEBUG [M:0;71d8d2d6408d:41523 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/15969415491f4d7aa8c581e4e92e37ca as hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/15969415491f4d7aa8c581e4e92e37ca 2024-11-24T03:48:29,976 INFO [M:0;71d8d2d6408d:41523 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/15969415491f4d7aa8c581e4e92e37ca, entries=6, sequenceid=56, filesize=6.0 K 2024-11-24T03:48:29,977 DEBUG [M:0;71d8d2d6408d:41523 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7cf567e38c4243129baa567649a4f8fe as hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7cf567e38c4243129baa567649a4f8fe 2024-11-24T03:48:29,984 INFO [M:0;71d8d2d6408d:41523 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7cf567e38c4243129baa567649a4f8fe, entries=1, sequenceid=56, filesize=5.0 K 2024-11-24T03:48:29,985 DEBUG [M:0;71d8d2d6408d:41523 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bae5f434cbdc48d1aef0fbdca733c84f as hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bae5f434cbdc48d1aef0fbdca733c84f 2024-11-24T03:48:29,992 INFO [M:0;71d8d2d6408d:41523 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bae5f434cbdc48d1aef0fbdca733c84f, entries=1, sequenceid=56, filesize=4.9 K 2024-11-24T03:48:29,993 INFO [M:0;71d8d2d6408d:41523 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=56, compaction requested=false 2024-11-24T03:48:30,011 INFO [M:0;71d8d2d6408d:41523 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:48:30,011 DEBUG [M:0;71d8d2d6408d:41523 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732420109841Disabling compacts and flushes for region at 1732420109841Disabling writes for close at 1732420109841Obtaining lock to block concurrent updates at 1732420109841Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732420109841Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732420109842 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732420109851 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732420109851Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732420109866 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732420109866Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732420109876 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732420109892 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732420109892Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732420109906 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732420109921 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732420109921Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732420109931 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732420109948 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732420109949 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6da40f07: reopening flushed file at 1732420109961 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b484b28: reopening flushed file at 1732420109968 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5bc6f42a: reopening flushed file at 1732420109976 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28e6c5b6: reopening flushed file at 1732420109984 (+8 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=56, compaction requested=false at 1732420109993 (+9 ms)Writing region close event to WAL at 1732420110011 (+18 ms)Closed at 1732420110011 2024-11-24T03:48:30,012 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:30,012 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:30,012 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:30,012 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:30,012 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:48:30,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45421 is added to blk_1073741847_1031 (size=757) 2024-11-24T03:48:30,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42297 is added to blk_1073741847_1031 (size=757) 2024-11-24T03:48:30,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:30,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:31,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:31,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:31,492 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:31,493 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:31,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:31,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:31,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:31,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:31,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:31,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:31,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:31,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:31,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:31,522 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:31,527 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:31,527 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:32,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:32,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:32,029 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T03:48:32,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:32,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:32,032 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:32,032 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:32,051 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T03:48:32,053 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:32,053 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:32,053 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:32,053 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:32,053 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:32,054 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:32,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:32,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:32,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:32,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:33,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:33,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:33,852 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/WALs/71d8d2d6408d,41523,1732420079646/71d8d2d6408d%2C41523%2C1732420079646.1732420079958 after 4001ms 2024-11-24T03:48:33,852 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/WALs/71d8d2d6408d,41523,1732420079646/71d8d2d6408d%2C41523%2C1732420079646.1732420079958 to hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/oldWALs/71d8d2d6408d%2C41523%2C1732420079646.1732420079958 2024-11-24T03:48:33,857 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/MasterData/oldWALs/71d8d2d6408d%2C41523%2C1732420079646.1732420079958 to hdfs://localhost:35611/user/jenkins/test-data/f6d6bedf-9923-91a1-043b-af22d31e7eef/oldWALs/71d8d2d6408d%2C41523%2C1732420079646.1732420079958$masterlocalwal$ 2024-11-24T03:48:33,857 INFO [M:0;71d8d2d6408d:41523 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T03:48:33,857 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T03:48:33,857 INFO [M:0;71d8d2d6408d:41523 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41523 2024-11-24T03:48:33,857 INFO [M:0;71d8d2d6408d:41523 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T03:48:34,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:48:34,007 INFO [M:0;71d8d2d6408d:41523 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T03:48:34,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41523-0x1016c3e22d20000, quorum=127.0.0.1:60504, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:48:34,011 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49afdd6a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:48:34,012 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@785865cb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:48:34,012 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:48:34,012 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b5224f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:48:34,012 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6db2908f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.log.dir/,STOPPED} 2024-11-24T03:48:34,014 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:48:34,014 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:48:34,014 WARN [BP-711475820-172.17.0.2-1732420077663 heartbeating to localhost/127.0.0.1:35611 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:48:34,015 WARN [BP-711475820-172.17.0.2-1732420077663 heartbeating to localhost/127.0.0.1:35611 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-711475820-172.17.0.2-1732420077663 (Datanode Uuid 0ccea49f-235e-49bb-a172-1d425c73d758) service to localhost/127.0.0.1:35611 2024-11-24T03:48:34,015 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/data/data3/current/BP-711475820-172.17.0.2-1732420077663 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:48:34,016 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/data/data4/current/BP-711475820-172.17.0.2-1732420077663 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:48:34,016 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:48:34,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@ce34ef2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:48:34,018 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@23d7111{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:48:34,018 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:48:34,019 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1703ac9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:48:34,019 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@444b0df5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.log.dir/,STOPPED} 2024-11-24T03:48:34,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:34,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:34,020 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:48:34,020 WARN [BP-711475820-172.17.0.2-1732420077663 heartbeating to localhost/127.0.0.1:35611 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:48:34,020 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:48:34,020 WARN [BP-711475820-172.17.0.2-1732420077663 heartbeating to localhost/127.0.0.1:35611 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-711475820-172.17.0.2-1732420077663 (Datanode Uuid 5629a8ec-134d-4be7-b6e9-1ac3e524de47) service to localhost/127.0.0.1:35611 2024-11-24T03:48:34,020 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/data/data1/current/BP-711475820-172.17.0.2-1732420077663 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:48:34,021 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/cluster_37a1f28f-c881-58ac-7b90-3a8e8bfa78a1/data/data2/current/BP-711475820-172.17.0.2-1732420077663 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:48:34,021 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:48:34,059 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b9a91a6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T03:48:34,059 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a096cfb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:48:34,060 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:48:34,060 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e4608e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:48:34,060 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@426efeef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.log.dir/,STOPPED} 2024-11-24T03:48:34,070 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T03:48:34,088 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T03:48:34,096 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=182 (was 157) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35611 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:35611 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:35611 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35611 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:35611 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:35611 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35611 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35611 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 454) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=195 (was 193) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6874 (was 7466) 2024-11-24T03:48:34,104 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=182, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=195, ProcessCount=11, AvailableMemoryMB=6874 2024-11-24T03:48:34,104 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T03:48:34,104 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.log.dir so I do NOT create it in target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a 2024-11-24T03:48:34,104 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bf54131-1488-b830-baa2-4e549a57d3b1/hadoop.tmp.dir so I do NOT create it in target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a 2024-11-24T03:48:34,105 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/cluster_33b1a009-e793-0723-44c1-292fd1be5d13, deleteOnExit=true 2024-11-24T03:48:34,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T03:48:34,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/test.cache.data in system properties and HBase conf 2024-11-24T03:48:34,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T03:48:34,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/hadoop.log.dir in system properties and HBase conf 2024-11-24T03:48:34,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T03:48:34,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T03:48:34,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T03:48:34,105 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T03:48:34,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T03:48:34,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T03:48:34,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T03:48:34,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T03:48:34,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T03:48:34,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T03:48:34,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T03:48:34,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T03:48:34,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T03:48:34,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/nfs.dump.dir in system properties and HBase conf 2024-11-24T03:48:34,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/java.io.tmpdir in system properties and HBase conf 2024-11-24T03:48:34,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T03:48:34,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T03:48:34,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T03:48:34,123 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T03:48:34,410 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:48:34,416 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:48:34,417 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:48:34,417 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:48:34,417 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T03:48:34,418 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:48:34,418 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@153be6d7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:48:34,419 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@675e37f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:48:34,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@38b36302{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/java.io.tmpdir/jetty-localhost-44599-hadoop-hdfs-3_4_1-tests_jar-_-any-10445713981430131869/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T03:48:34,545 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@40cc7ce8{HTTP/1.1, (http/1.1)}{localhost:44599} 2024-11-24T03:48:34,545 INFO [Time-limited test {}] server.Server(415): Started @194281ms 2024-11-24T03:48:34,558 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T03:48:34,756 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:48:34,759 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:48:34,760 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:48:34,760 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:48:34,760 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T03:48:34,761 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@724d8870{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:48:34,761 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e17225c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:48:34,863 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58200445{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/java.io.tmpdir/jetty-localhost-34481-hadoop-hdfs-3_4_1-tests_jar-_-any-2394372444388354703/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:48:34,864 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@403776ad{HTTP/1.1, (http/1.1)}{localhost:34481} 2024-11-24T03:48:34,864 INFO [Time-limited test {}] server.Server(415): Started @194599ms 2024-11-24T03:48:34,865 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:48:34,913 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:48:34,916 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:48:34,917 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:48:34,917 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:48:34,917 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T03:48:34,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c78c403{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:48:34,918 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50c8efd9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:48:35,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:35,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:35,033 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5559c5a9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/java.io.tmpdir/jetty-localhost-39707-hadoop-hdfs-3_4_1-tests_jar-_-any-501889115537265446/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:48:35,033 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@25901f18{HTTP/1.1, (http/1.1)}{localhost:39707} 2024-11-24T03:48:35,033 INFO [Time-limited test {}] server.Server(415): Started @194768ms 2024-11-24T03:48:35,034 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:48:35,684 WARN [Thread-1648 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/cluster_33b1a009-e793-0723-44c1-292fd1be5d13/data/data1/current/BP-1531438035-172.17.0.2-1732420114136/current, will proceed with Du for space computation calculation, 2024-11-24T03:48:35,691 WARN [Thread-1649 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/cluster_33b1a009-e793-0723-44c1-292fd1be5d13/data/data2/current/BP-1531438035-172.17.0.2-1732420114136/current, will proceed with Du for space computation calculation, 2024-11-24T03:48:35,712 WARN [Thread-1612 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:48:35,715 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf35998e5e4999f0e with lease ID 0xd2f8290ec7ec9b37: Processing first storage report for DS-177deeaa-1aec-4c36-ac1d-424c7be22dab from datanode DatanodeRegistration(127.0.0.1:44679, datanodeUuid=f865cced-d508-4799-9a1a-385e66b7bf18, infoPort=41241, infoSecurePort=0, ipcPort=42927, storageInfo=lv=-57;cid=testClusterID;nsid=55833844;c=1732420114136) 2024-11-24T03:48:35,715 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf35998e5e4999f0e with lease ID 0xd2f8290ec7ec9b37: from storage DS-177deeaa-1aec-4c36-ac1d-424c7be22dab node DatanodeRegistration(127.0.0.1:44679, datanodeUuid=f865cced-d508-4799-9a1a-385e66b7bf18, infoPort=41241, infoSecurePort=0, ipcPort=42927, storageInfo=lv=-57;cid=testClusterID;nsid=55833844;c=1732420114136), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:48:35,715 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf35998e5e4999f0e with lease ID 0xd2f8290ec7ec9b37: Processing first storage report for DS-53d6e6bf-0157-47d8-97b1-62bf5b95cdd9 from datanode DatanodeRegistration(127.0.0.1:44679, datanodeUuid=f865cced-d508-4799-9a1a-385e66b7bf18, infoPort=41241, infoSecurePort=0, ipcPort=42927, storageInfo=lv=-57;cid=testClusterID;nsid=55833844;c=1732420114136) 2024-11-24T03:48:35,715 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf35998e5e4999f0e with lease ID 0xd2f8290ec7ec9b37: from storage DS-53d6e6bf-0157-47d8-97b1-62bf5b95cdd9 node DatanodeRegistration(127.0.0.1:44679, datanodeUuid=f865cced-d508-4799-9a1a-385e66b7bf18, infoPort=41241, infoSecurePort=0, ipcPort=42927, storageInfo=lv=-57;cid=testClusterID;nsid=55833844;c=1732420114136), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:48:35,799 WARN [Thread-1659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/cluster_33b1a009-e793-0723-44c1-292fd1be5d13/data/data3/current/BP-1531438035-172.17.0.2-1732420114136/current, will proceed with Du for space computation calculation, 2024-11-24T03:48:35,799 WARN [Thread-1660 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/cluster_33b1a009-e793-0723-44c1-292fd1be5d13/data/data4/current/BP-1531438035-172.17.0.2-1732420114136/current, will proceed with Du for space computation calculation, 2024-11-24T03:48:35,817 WARN [Thread-1635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:48:35,819 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96496ff1a9c2ac89 with lease ID 0xd2f8290ec7ec9b38: Processing first storage report for DS-a151dac9-4fe2-47a9-8c78-b5517a37d178 from datanode DatanodeRegistration(127.0.0.1:33587, datanodeUuid=b6c6a8ed-e077-446f-a530-41c6896271fc, infoPort=43081, infoSecurePort=0, ipcPort=35429, storageInfo=lv=-57;cid=testClusterID;nsid=55833844;c=1732420114136) 2024-11-24T03:48:35,819 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96496ff1a9c2ac89 with lease ID 0xd2f8290ec7ec9b38: from storage DS-a151dac9-4fe2-47a9-8c78-b5517a37d178 node DatanodeRegistration(127.0.0.1:33587, datanodeUuid=b6c6a8ed-e077-446f-a530-41c6896271fc, infoPort=43081, infoSecurePort=0, ipcPort=35429, storageInfo=lv=-57;cid=testClusterID;nsid=55833844;c=1732420114136), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:48:35,819 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96496ff1a9c2ac89 with lease ID 0xd2f8290ec7ec9b38: Processing first storage report for DS-b5414a5c-cd89-4932-99d4-27df10ab29f6 from datanode DatanodeRegistration(127.0.0.1:33587, datanodeUuid=b6c6a8ed-e077-446f-a530-41c6896271fc, infoPort=43081, infoSecurePort=0, ipcPort=35429, storageInfo=lv=-57;cid=testClusterID;nsid=55833844;c=1732420114136) 2024-11-24T03:48:35,819 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96496ff1a9c2ac89 with lease ID 0xd2f8290ec7ec9b38: from storage DS-b5414a5c-cd89-4932-99d4-27df10ab29f6 node DatanodeRegistration(127.0.0.1:33587, datanodeUuid=b6c6a8ed-e077-446f-a530-41c6896271fc, infoPort=43081, infoSecurePort=0, ipcPort=35429, storageInfo=lv=-57;cid=testClusterID;nsid=55833844;c=1732420114136), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:48:35,862 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a 2024-11-24T03:48:35,866 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/cluster_33b1a009-e793-0723-44c1-292fd1be5d13/zookeeper_0, clientPort=65090, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/cluster_33b1a009-e793-0723-44c1-292fd1be5d13/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/cluster_33b1a009-e793-0723-44c1-292fd1be5d13/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T03:48:35,867 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=65090 2024-11-24T03:48:35,868 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:48:35,870 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:48:35,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741825_1001 (size=7) 2024-11-24T03:48:35,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741825_1001 (size=7) 2024-11-24T03:48:35,888 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1 with version=8 2024-11-24T03:48:35,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/hbase-staging 2024-11-24T03:48:35,890 INFO [Time-limited test {}] client.ConnectionUtils(128): master/71d8d2d6408d:0 server-side Connection retries=45 2024-11-24T03:48:35,890 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:48:35,891 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T03:48:35,891 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T03:48:35,891 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:48:35,891 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T03:48:35,891 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T03:48:35,891 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T03:48:35,891 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46485 2024-11-24T03:48:35,893 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46485 connecting to ZooKeeper ensemble=127.0.0.1:65090 2024-11-24T03:48:35,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:464850x0, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T03:48:35,942 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46485-0x1016c3eb05e0000 connected 2024-11-24T03:48:36,007 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:48:36,009 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:48:36,011 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:48:36,011 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1, hbase.cluster.distributed=false 2024-11-24T03:48:36,013 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T03:48:36,014 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46485 2024-11-24T03:48:36,014 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46485 2024-11-24T03:48:36,014 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46485 2024-11-24T03:48:36,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46485 2024-11-24T03:48:36,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46485 2024-11-24T03:48:36,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:36,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:36,043 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/71d8d2d6408d:0 server-side Connection retries=45 2024-11-24T03:48:36,043 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:48:36,043 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T03:48:36,043 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T03:48:36,044 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:48:36,044 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T03:48:36,044 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T03:48:36,044 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T03:48:36,047 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45261 2024-11-24T03:48:36,049 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45261 connecting to ZooKeeper ensemble=127.0.0.1:65090 2024-11-24T03:48:36,050 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:48:36,052 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:48:36,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:452610x0, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T03:48:36,074 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:452610x0, quorum=127.0.0.1:65090, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:48:36,074 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45261-0x1016c3eb05e0001 connected 2024-11-24T03:48:36,074 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T03:48:36,075 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T03:48:36,075 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T03:48:36,076 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T03:48:36,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45261 2024-11-24T03:48:36,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45261 2024-11-24T03:48:36,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45261 2024-11-24T03:48:36,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45261 2024-11-24T03:48:36,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45261 2024-11-24T03:48:36,091 DEBUG [M:0;71d8d2d6408d:46485 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;71d8d2d6408d:46485 2024-11-24T03:48:36,092 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/71d8d2d6408d,46485,1732420115890 2024-11-24T03:48:36,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:48:36,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:48:36,098 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/71d8d2d6408d,46485,1732420115890 2024-11-24T03:48:36,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:36,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T03:48:36,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:36,107 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T03:48:36,107 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/71d8d2d6408d,46485,1732420115890 from backup master directory 2024-11-24T03:48:36,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:48:36,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/71d8d2d6408d,46485,1732420115890 2024-11-24T03:48:36,115 WARN [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T03:48:36,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:48:36,115 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=71d8d2d6408d,46485,1732420115890 2024-11-24T03:48:36,120 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/hbase.id] with ID: 288b0fd9-8319-424e-8588-fad08c04e088 2024-11-24T03:48:36,120 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/.tmp/hbase.id 2024-11-24T03:48:36,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741826_1002 (size=42) 2024-11-24T03:48:36,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741826_1002 (size=42) 2024-11-24T03:48:36,126 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/.tmp/hbase.id]:[hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/hbase.id] 2024-11-24T03:48:36,139 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:48:36,139 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T03:48:36,141 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-24T03:48:36,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:36,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:36,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741827_1003 (size=196) 2024-11-24T03:48:36,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741827_1003 (size=196) 2024-11-24T03:48:36,155 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T03:48:36,155 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T03:48:36,156 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:48:36,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741828_1004 (size=1189) 2024-11-24T03:48:36,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741828_1004 (size=1189) 2024-11-24T03:48:36,164 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store 2024-11-24T03:48:36,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741829_1005 (size=34) 2024-11-24T03:48:36,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741829_1005 (size=34) 2024-11-24T03:48:36,171 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:48:36,171 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T03:48:36,171 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:48:36,172 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:48:36,172 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T03:48:36,172 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:48:36,172 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:48:36,172 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732420116171Disabling compacts and flushes for region at 1732420116171Disabling writes for close at 1732420116172 (+1 ms)Writing region close event to WAL at 1732420116172Closed at 1732420116172 2024-11-24T03:48:36,173 WARN [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/.initializing 2024-11-24T03:48:36,173 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/WALs/71d8d2d6408d,46485,1732420115890 2024-11-24T03:48:36,175 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C46485%2C1732420115890, suffix=, logDir=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/WALs/71d8d2d6408d,46485,1732420115890, archiveDir=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/oldWALs, maxLogs=10 2024-11-24T03:48:36,175 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C46485%2C1732420115890.1732420116175 2024-11-24T03:48:36,180 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/WALs/71d8d2d6408d,46485,1732420115890/71d8d2d6408d%2C46485%2C1732420115890.1732420116175 2024-11-24T03:48:36,183 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43081:43081),(127.0.0.1/127.0.0.1:41241:41241)] 2024-11-24T03:48:36,183 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:48:36,184 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:48:36,184 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:48:36,184 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:48:36,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:48:36,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T03:48:36,187 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:36,187 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:36,187 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:48:36,188 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T03:48:36,188 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:36,189 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:48:36,189 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:48:36,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T03:48:36,190 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:36,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:48:36,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:48:36,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T03:48:36,192 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:36,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:48:36,193 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:48:36,194 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:48:36,194 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:48:36,195 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:48:36,195 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:48:36,196 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T03:48:36,197 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:48:36,198 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:48:36,199 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876129, jitterRate=0.11405685544013977}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T03:48:36,199 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732420116184Initializing all the Stores at 1732420116185 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420116185Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420116185Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420116185Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420116185Cleaning up temporary data from old regions at 1732420116195 (+10 ms)Region opened successfully at 1732420116199 (+4 ms) 2024-11-24T03:48:36,200 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T03:48:36,203 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@da51dab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=71d8d2d6408d/172.17.0.2:0 2024-11-24T03:48:36,204 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T03:48:36,204 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T03:48:36,204 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T03:48:36,204 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T03:48:36,205 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T03:48:36,205 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T03:48:36,205 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T03:48:36,208 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T03:48:36,209 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T03:48:36,240 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T03:48:36,241 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T03:48:36,242 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T03:48:36,248 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T03:48:36,249 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T03:48:36,250 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T03:48:36,256 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T03:48:36,258 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T03:48:36,265 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T03:48:36,267 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T03:48:36,273 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T03:48:36,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T03:48:36,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T03:48:36,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:36,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:36,282 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=71d8d2d6408d,46485,1732420115890, sessionid=0x1016c3eb05e0000, setting cluster-up flag (Was=false) 2024-11-24T03:48:36,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:36,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:36,323 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T03:48:36,324 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=71d8d2d6408d,46485,1732420115890 2024-11-24T03:48:36,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:36,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:36,365 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T03:48:36,367 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=71d8d2d6408d,46485,1732420115890 2024-11-24T03:48:36,369 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T03:48:36,371 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T03:48:36,372 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T03:48:36,372 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T03:48:36,372 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 71d8d2d6408d,46485,1732420115890 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T03:48:36,374 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:48:36,374 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:48:36,374 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:48:36,374 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:48:36,374 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/71d8d2d6408d:0, corePoolSize=10, maxPoolSize=10 2024-11-24T03:48:36,374 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:36,374 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T03:48:36,374 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:36,375 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732420146375 2024-11-24T03:48:36,375 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T03:48:36,375 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T03:48:36,375 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T03:48:36,375 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T03:48:36,375 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T03:48:36,375 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T03:48:36,375 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:36,376 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T03:48:36,376 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:48:36,376 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T03:48:36,376 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T03:48:36,376 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T03:48:36,376 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T03:48:36,376 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T03:48:36,377 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420116377,5,FailOnTimeoutGroup] 2024-11-24T03:48:36,377 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420116377,5,FailOnTimeoutGroup] 2024-11-24T03:48:36,377 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:36,377 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T03:48:36,377 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:36,377 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:36,377 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:36,377 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T03:48:36,380 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer(746): ClusterId : 288b0fd9-8319-424e-8588-fad08c04e088 2024-11-24T03:48:36,380 DEBUG [RS:0;71d8d2d6408d:45261 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T03:48:36,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741831_1007 (size=1321) 2024-11-24T03:48:36,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741831_1007 (size=1321) 2024-11-24T03:48:36,391 DEBUG [RS:0;71d8d2d6408d:45261 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T03:48:36,391 DEBUG [RS:0;71d8d2d6408d:45261 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T03:48:36,391 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T03:48:36,391 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1 2024-11-24T03:48:36,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741832_1008 (size=32) 2024-11-24T03:48:36,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741832_1008 (size=32) 2024-11-24T03:48:36,398 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:48:36,399 DEBUG [RS:0;71d8d2d6408d:45261 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T03:48:36,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T03:48:36,399 DEBUG [RS:0;71d8d2d6408d:45261 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e9b83bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=71d8d2d6408d/172.17.0.2:0 2024-11-24T03:48:36,401 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T03:48:36,401 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:36,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:36,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T03:48:36,403 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T03:48:36,403 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:36,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:36,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T03:48:36,405 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T03:48:36,405 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:36,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:36,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T03:48:36,407 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T03:48:36,407 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:36,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:36,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T03:48:36,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740 2024-11-24T03:48:36,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740 2024-11-24T03:48:36,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T03:48:36,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T03:48:36,410 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T03:48:36,411 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T03:48:36,413 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:48:36,413 DEBUG [RS:0;71d8d2d6408d:45261 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;71d8d2d6408d:45261 2024-11-24T03:48:36,413 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T03:48:36,413 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T03:48:36,413 DEBUG [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T03:48:36,413 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=866734, jitterRate=0.10210961103439331}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T03:48:36,414 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer(2659): reportForDuty to master=71d8d2d6408d,46485,1732420115890 with port=45261, startcode=1732420116043 2024-11-24T03:48:36,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732420116398Initializing all the Stores at 1732420116399 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420116399Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420116399Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420116399Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420116399Cleaning up temporary data from old regions at 1732420116410 (+11 ms)Region opened successfully at 1732420116414 (+4 ms) 2024-11-24T03:48:36,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T03:48:36,414 DEBUG [RS:0;71d8d2d6408d:45261 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T03:48:36,414 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T03:48:36,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T03:48:36,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T03:48:36,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T03:48:36,415 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T03:48:36,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732420116414Disabling compacts and flushes for region at 1732420116414Disabling writes for close at 1732420116414Writing region close event to WAL at 1732420116414Closed at 1732420116414 2024-11-24T03:48:36,416 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:48:36,416 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T03:48:36,416 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T03:48:36,416 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33505, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T03:48:36,416 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46485 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 71d8d2d6408d,45261,1732420116043 2024-11-24T03:48:36,417 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46485 {}] master.ServerManager(517): Registering regionserver=71d8d2d6408d,45261,1732420116043 2024-11-24T03:48:36,417 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T03:48:36,418 DEBUG [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1 2024-11-24T03:48:36,418 DEBUG [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46531 2024-11-24T03:48:36,418 DEBUG [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T03:48:36,418 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T03:48:36,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T03:48:36,423 DEBUG [RS:0;71d8d2d6408d:45261 {}] zookeeper.ZKUtil(111): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/71d8d2d6408d,45261,1732420116043 2024-11-24T03:48:36,424 WARN [RS:0;71d8d2d6408d:45261 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T03:48:36,424 INFO [RS:0;71d8d2d6408d:45261 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:48:36,424 DEBUG [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/WALs/71d8d2d6408d,45261,1732420116043 2024-11-24T03:48:36,424 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [71d8d2d6408d,45261,1732420116043] 2024-11-24T03:48:36,427 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T03:48:36,435 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T03:48:36,436 INFO [RS:0;71d8d2d6408d:45261 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T03:48:36,436 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:36,436 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T03:48:36,437 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T03:48:36,437 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:36,437 DEBUG [RS:0;71d8d2d6408d:45261 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:36,437 DEBUG [RS:0;71d8d2d6408d:45261 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:36,437 DEBUG [RS:0;71d8d2d6408d:45261 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:36,437 DEBUG [RS:0;71d8d2d6408d:45261 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:36,437 DEBUG [RS:0;71d8d2d6408d:45261 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:36,437 DEBUG [RS:0;71d8d2d6408d:45261 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/71d8d2d6408d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T03:48:36,437 DEBUG [RS:0;71d8d2d6408d:45261 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:36,437 DEBUG [RS:0;71d8d2d6408d:45261 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:36,437 DEBUG [RS:0;71d8d2d6408d:45261 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:36,437 DEBUG [RS:0;71d8d2d6408d:45261 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:36,437 DEBUG [RS:0;71d8d2d6408d:45261 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:36,437 DEBUG [RS:0;71d8d2d6408d:45261 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:48:36,437 DEBUG [RS:0;71d8d2d6408d:45261 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/71d8d2d6408d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T03:48:36,438 DEBUG [RS:0;71d8d2d6408d:45261 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T03:48:36,438 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:36,438 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:36,438 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:36,438 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:36,438 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:36,438 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,45261,1732420116043-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T03:48:36,453 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T03:48:36,453 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,45261,1732420116043-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:36,454 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:36,454 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.Replication(171): 71d8d2d6408d,45261,1732420116043 started 2024-11-24T03:48:36,467 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:36,467 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer(1482): Serving as 71d8d2d6408d,45261,1732420116043, RpcServer on 71d8d2d6408d/172.17.0.2:45261, sessionid=0x1016c3eb05e0001 2024-11-24T03:48:36,467 DEBUG [RS:0;71d8d2d6408d:45261 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T03:48:36,468 DEBUG [RS:0;71d8d2d6408d:45261 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 71d8d2d6408d,45261,1732420116043 2024-11-24T03:48:36,468 DEBUG [RS:0;71d8d2d6408d:45261 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '71d8d2d6408d,45261,1732420116043' 2024-11-24T03:48:36,468 DEBUG [RS:0;71d8d2d6408d:45261 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T03:48:36,468 DEBUG [RS:0;71d8d2d6408d:45261 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T03:48:36,468 DEBUG [RS:0;71d8d2d6408d:45261 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T03:48:36,469 DEBUG [RS:0;71d8d2d6408d:45261 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T03:48:36,469 DEBUG [RS:0;71d8d2d6408d:45261 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 71d8d2d6408d,45261,1732420116043 2024-11-24T03:48:36,469 DEBUG [RS:0;71d8d2d6408d:45261 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '71d8d2d6408d,45261,1732420116043' 2024-11-24T03:48:36,469 DEBUG [RS:0;71d8d2d6408d:45261 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T03:48:36,469 DEBUG [RS:0;71d8d2d6408d:45261 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T03:48:36,469 DEBUG [RS:0;71d8d2d6408d:45261 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T03:48:36,469 INFO [RS:0;71d8d2d6408d:45261 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T03:48:36,469 INFO [RS:0;71d8d2d6408d:45261 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T03:48:36,569 WARN [71d8d2d6408d:46485 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T03:48:36,571 INFO [RS:0;71d8d2d6408d:45261 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C45261%2C1732420116043, suffix=, logDir=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/WALs/71d8d2d6408d,45261,1732420116043, archiveDir=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/oldWALs, maxLogs=32 2024-11-24T03:48:36,572 INFO [RS:0;71d8d2d6408d:45261 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C45261%2C1732420116043.1732420116572 2024-11-24T03:48:36,578 INFO [RS:0;71d8d2d6408d:45261 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/WALs/71d8d2d6408d,45261,1732420116043/71d8d2d6408d%2C45261%2C1732420116043.1732420116572 2024-11-24T03:48:36,580 DEBUG [RS:0;71d8d2d6408d:45261 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41241:41241),(127.0.0.1/127.0.0.1:43081:43081)] 2024-11-24T03:48:36,819 DEBUG [71d8d2d6408d:46485 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T03:48:36,819 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=71d8d2d6408d,45261,1732420116043 2024-11-24T03:48:36,821 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 71d8d2d6408d,45261,1732420116043, state=OPENING 2024-11-24T03:48:36,831 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T03:48:36,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:36,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:48:36,841 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T03:48:36,841 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:48:36,841 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:48:36,841 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=71d8d2d6408d,45261,1732420116043}] 2024-11-24T03:48:36,995 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T03:48:36,997 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40153, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T03:48:37,002 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T03:48:37,002 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:48:37,005 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C45261%2C1732420116043.meta, suffix=.meta, logDir=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/WALs/71d8d2d6408d,45261,1732420116043, archiveDir=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/oldWALs, maxLogs=32 2024-11-24T03:48:37,006 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C45261%2C1732420116043.meta.1732420117005.meta 2024-11-24T03:48:37,012 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/WALs/71d8d2d6408d,45261,1732420116043/71d8d2d6408d%2C45261%2C1732420116043.meta.1732420117005.meta 2024-11-24T03:48:37,013 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41241:41241),(127.0.0.1/127.0.0.1:43081:43081)] 2024-11-24T03:48:37,014 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:48:37,015 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T03:48:37,015 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T03:48:37,015 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T03:48:37,015 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T03:48:37,015 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:48:37,015 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T03:48:37,015 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T03:48:37,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T03:48:37,018 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T03:48:37,018 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:37,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:37,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T03:48:37,019 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T03:48:37,020 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:37,020 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:37,020 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T03:48:37,021 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T03:48:37,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:37,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:37,021 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:37,021 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:37,022 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T03:48:37,022 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T03:48:37,022 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:37,023 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:48:37,023 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T03:48:37,024 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740 2024-11-24T03:48:37,026 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740 2024-11-24T03:48:37,028 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T03:48:37,028 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T03:48:37,028 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T03:48:37,030 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T03:48:37,031 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=730905, jitterRate=-0.07060645520687103}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T03:48:37,031 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T03:48:37,032 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732420117016Writing region info on filesystem at 1732420117016Initializing all the Stores at 1732420117016Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420117017 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420117017Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420117017Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420117017Cleaning up temporary data from old regions at 1732420117028 (+11 ms)Running coprocessor post-open hooks at 1732420117031 (+3 ms)Region opened successfully at 1732420117032 (+1 ms) 2024-11-24T03:48:37,033 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732420116995 2024-11-24T03:48:37,035 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T03:48:37,035 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T03:48:37,036 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=71d8d2d6408d,45261,1732420116043 2024-11-24T03:48:37,037 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 71d8d2d6408d,45261,1732420116043, state=OPEN 2024-11-24T03:48:37,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T03:48:37,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T03:48:37,109 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=71d8d2d6408d,45261,1732420116043 2024-11-24T03:48:37,109 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:48:37,109 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:48:37,113 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T03:48:37,113 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=71d8d2d6408d,45261,1732420116043 in 268 msec 2024-11-24T03:48:37,117 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T03:48:37,118 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 697 msec 2024-11-24T03:48:37,119 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:48:37,119 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T03:48:37,120 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T03:48:37,120 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=71d8d2d6408d,45261,1732420116043, seqNum=-1] 2024-11-24T03:48:37,121 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T03:48:37,122 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51453, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T03:48:37,129 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 757 msec 2024-11-24T03:48:37,129 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732420117129, completionTime=-1 2024-11-24T03:48:37,129 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T03:48:37,129 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T03:48:37,131 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T03:48:37,131 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732420177131 2024-11-24T03:48:37,131 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732420237131 2024-11-24T03:48:37,131 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-24T03:48:37,131 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,46485,1732420115890-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:37,131 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,46485,1732420115890-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:37,131 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,46485,1732420115890-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:37,132 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-71d8d2d6408d:46485, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:37,132 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:37,132 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:37,134 DEBUG [master/71d8d2d6408d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T03:48:37,135 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.020sec 2024-11-24T03:48:37,136 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T03:48:37,136 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T03:48:37,136 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T03:48:37,136 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T03:48:37,136 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T03:48:37,136 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,46485,1732420115890-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T03:48:37,136 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,46485,1732420115890-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T03:48:37,138 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T03:48:37,138 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T03:48:37,138 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,46485,1732420115890-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:48:37,180 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52063a7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:48:37,180 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 71d8d2d6408d,46485,-1 for getting cluster id 2024-11-24T03:48:37,181 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T03:48:37,182 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '288b0fd9-8319-424e-8588-fad08c04e088' 2024-11-24T03:48:37,183 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T03:48:37,183 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "288b0fd9-8319-424e-8588-fad08c04e088" 2024-11-24T03:48:37,183 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25704676, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:48:37,184 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [71d8d2d6408d,46485,-1] 2024-11-24T03:48:37,184 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T03:48:37,184 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:48:37,186 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34532, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T03:48:37,187 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fea105f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:48:37,188 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T03:48:37,189 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=71d8d2d6408d,45261,1732420116043, seqNum=-1] 2024-11-24T03:48:37,190 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T03:48:37,191 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56212, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T03:48:37,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=71d8d2d6408d,46485,1732420115890 2024-11-24T03:48:37,193 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:48:37,196 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T03:48:37,197 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T03:48:37,198 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 71d8d2d6408d,46485,1732420115890 2024-11-24T03:48:37,198 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1657e7f6 2024-11-24T03:48:37,198 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T03:48:37,199 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34546, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T03:48:37,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T03:48:37,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T03:48:37,200 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T03:48:37,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T03:48:37,203 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T03:48:37,203 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:37,204 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-24T03:48:37,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T03:48:37,205 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T03:48:37,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741835_1011 (size=405) 2024-11-24T03:48:37,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741835_1011 (size=405) 2024-11-24T03:48:37,218 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 33c9d87ba47052a3c145f4311f8986e1, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1 2024-11-24T03:48:37,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741836_1012 (size=88) 2024-11-24T03:48:37,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741836_1012 (size=88) 2024-11-24T03:48:37,230 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:48:37,230 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 33c9d87ba47052a3c145f4311f8986e1, disabling compactions & flushes 2024-11-24T03:48:37,230 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:48:37,230 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:48:37,230 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. after waiting 0 ms 2024-11-24T03:48:37,230 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:48:37,230 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:48:37,230 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 33c9d87ba47052a3c145f4311f8986e1: Waiting for close lock at 1732420117230Disabling compacts and flushes for region at 1732420117230Disabling writes for close at 1732420117230Writing region close event to WAL at 1732420117230Closed at 1732420117230 2024-11-24T03:48:37,232 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T03:48:37,232 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732420117232"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732420117232"}]},"ts":"1732420117232"} 2024-11-24T03:48:37,235 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T03:48:37,236 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T03:48:37,236 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732420117236"}]},"ts":"1732420117236"} 2024-11-24T03:48:37,238 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-24T03:48:37,239 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=33c9d87ba47052a3c145f4311f8986e1, ASSIGN}] 2024-11-24T03:48:37,240 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=33c9d87ba47052a3c145f4311f8986e1, ASSIGN 2024-11-24T03:48:37,241 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=33c9d87ba47052a3c145f4311f8986e1, ASSIGN; state=OFFLINE, location=71d8d2d6408d,45261,1732420116043; forceNewPlan=false, retain=false 2024-11-24T03:48:37,392 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=33c9d87ba47052a3c145f4311f8986e1, regionState=OPENING, regionLocation=71d8d2d6408d,45261,1732420116043 2024-11-24T03:48:37,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=33c9d87ba47052a3c145f4311f8986e1, ASSIGN because future has completed 2024-11-24T03:48:37,395 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 33c9d87ba47052a3c145f4311f8986e1, server=71d8d2d6408d,45261,1732420116043}] 2024-11-24T03:48:37,406 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T03:48:37,406 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T03:48:37,407 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T03:48:37,407 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T03:48:37,551 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:48:37,552 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 33c9d87ba47052a3c145f4311f8986e1, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1.', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:48:37,552 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 33c9d87ba47052a3c145f4311f8986e1 2024-11-24T03:48:37,552 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:48:37,552 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 33c9d87ba47052a3c145f4311f8986e1 2024-11-24T03:48:37,552 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 33c9d87ba47052a3c145f4311f8986e1 2024-11-24T03:48:37,553 INFO [StoreOpener-33c9d87ba47052a3c145f4311f8986e1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 33c9d87ba47052a3c145f4311f8986e1 2024-11-24T03:48:37,555 INFO [StoreOpener-33c9d87ba47052a3c145f4311f8986e1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 33c9d87ba47052a3c145f4311f8986e1 columnFamilyName info 2024-11-24T03:48:37,555 DEBUG [StoreOpener-33c9d87ba47052a3c145f4311f8986e1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:48:37,555 INFO [StoreOpener-33c9d87ba47052a3c145f4311f8986e1-1 {}] regionserver.HStore(327): Store=33c9d87ba47052a3c145f4311f8986e1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:48:37,556 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 33c9d87ba47052a3c145f4311f8986e1 2024-11-24T03:48:37,556 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1 2024-11-24T03:48:37,557 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1 2024-11-24T03:48:37,557 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 33c9d87ba47052a3c145f4311f8986e1 2024-11-24T03:48:37,557 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 33c9d87ba47052a3c145f4311f8986e1 2024-11-24T03:48:37,559 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 33c9d87ba47052a3c145f4311f8986e1 2024-11-24T03:48:37,562 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:48:37,562 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 33c9d87ba47052a3c145f4311f8986e1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=732981, jitterRate=-0.06796741485595703}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T03:48:37,562 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 33c9d87ba47052a3c145f4311f8986e1 2024-11-24T03:48:37,563 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 33c9d87ba47052a3c145f4311f8986e1: Running coprocessor pre-open hook at 1732420117552Writing region info on filesystem at 1732420117552Initializing all the Stores at 1732420117553 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420117553Cleaning up temporary data from old regions at 1732420117557 (+4 ms)Running coprocessor post-open hooks at 1732420117562 (+5 ms)Region opened successfully at 1732420117563 (+1 ms) 2024-11-24T03:48:37,565 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1., pid=6, masterSystemTime=1732420117548 2024-11-24T03:48:37,568 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:48:37,568 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:48:37,569 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=33c9d87ba47052a3c145f4311f8986e1, regionState=OPEN, openSeqNum=2, regionLocation=71d8d2d6408d,45261,1732420116043 2024-11-24T03:48:37,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 33c9d87ba47052a3c145f4311f8986e1, server=71d8d2d6408d,45261,1732420116043 because future has completed 2024-11-24T03:48:37,578 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T03:48:37,578 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 33c9d87ba47052a3c145f4311f8986e1, server=71d8d2d6408d,45261,1732420116043 in 179 msec 2024-11-24T03:48:37,581 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T03:48:37,581 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=33c9d87ba47052a3c145f4311f8986e1, ASSIGN in 339 msec 2024-11-24T03:48:37,582 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T03:48:37,582 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732420117582"}]},"ts":"1732420117582"} 2024-11-24T03:48:37,585 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-24T03:48:37,586 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T03:48:37,589 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 386 msec 2024-11-24T03:48:38,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:38,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:39,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:39,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:40,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:40,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:41,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:41,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:42,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:42,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:42,518 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T03:48:42,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:42,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:42,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:42,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:42,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:42,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:42,539 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:42,540 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:42,540 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:42,540 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:42,540 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:42,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:42,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:42,545 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:42,545 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:42,547 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:48:42,552 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T03:48:42,553 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-24T03:48:43,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:43,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:44,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:44,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:45,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:45,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:46,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:46,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:47,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:47,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:47,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T03:48:47,292 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T03:48:47,292 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-24T03:48:47,295 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T03:48:47,295 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:48:47,299 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1., hostname=71d8d2d6408d,45261,1732420116043, seqNum=2] 2024-11-24T03:48:47,307 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T03:48:47,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T03:48:47,314 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T03:48:47,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-24T03:48:47,315 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T03:48:47,317 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T03:48:47,406 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T03:48:47,406 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T03:48:47,479 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45261 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-24T03:48:47,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:48:47,480 INFO [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 33c9d87ba47052a3c145f4311f8986e1 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T03:48:47,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/61001eea8c494d0590095919d72f6c2e is 1080, key is row0001/info:/1732420127300/Put/seqid=0 2024-11-24T03:48:47,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741837_1013 (size=6033) 2024-11-24T03:48:47,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741837_1013 (size=6033) 2024-11-24T03:48:47,518 INFO [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/61001eea8c494d0590095919d72f6c2e 2024-11-24T03:48:47,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/61001eea8c494d0590095919d72f6c2e as hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/61001eea8c494d0590095919d72f6c2e 2024-11-24T03:48:47,542 INFO [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/61001eea8c494d0590095919d72f6c2e, entries=1, sequenceid=5, filesize=5.9 K 2024-11-24T03:48:47,543 INFO [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 33c9d87ba47052a3c145f4311f8986e1 in 63ms, sequenceid=5, compaction requested=false 2024-11-24T03:48:47,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 33c9d87ba47052a3c145f4311f8986e1: 2024-11-24T03:48:47,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:48:47,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-24T03:48:47,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-24T03:48:47,555 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-24T03:48:47,555 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 233 msec 2024-11-24T03:48:47,559 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 248 msec 2024-11-24T03:48:48,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:48,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:49,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:49,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:50,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:50,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:51,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:51,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:52,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:52,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:53,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:53,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:54,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:54,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:55,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:55,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:56,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:56,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:57,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:57,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:57,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-24T03:48:57,342 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T03:48:57,345 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T03:48:57,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T03:48:57,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-24T03:48:57,348 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T03:48:57,349 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T03:48:57,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T03:48:57,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45261 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-24T03:48:57,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:48:57,503 INFO [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 33c9d87ba47052a3c145f4311f8986e1 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T03:48:57,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/8874f466c1b8444d8e5c9bdbfac591ad is 1080, key is row0002/info:/1732420137343/Put/seqid=0 2024-11-24T03:48:57,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741838_1014 (size=6033) 2024-11-24T03:48:57,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741838_1014 (size=6033) 2024-11-24T03:48:57,513 INFO [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/8874f466c1b8444d8e5c9bdbfac591ad 2024-11-24T03:48:57,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/8874f466c1b8444d8e5c9bdbfac591ad as hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/8874f466c1b8444d8e5c9bdbfac591ad 2024-11-24T03:48:57,532 INFO [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/8874f466c1b8444d8e5c9bdbfac591ad, entries=1, sequenceid=9, filesize=5.9 K 2024-11-24T03:48:57,533 INFO [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 33c9d87ba47052a3c145f4311f8986e1 in 30ms, sequenceid=9, compaction requested=false 2024-11-24T03:48:57,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 33c9d87ba47052a3c145f4311f8986e1: 2024-11-24T03:48:57,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:48:57,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-24T03:48:57,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-24T03:48:57,538 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-24T03:48:57,538 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-11-24T03:48:57,541 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 194 msec 2024-11-24T03:48:58,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:58,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:59,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:48:59,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:00,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:00,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:00,038 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 after 68063ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:49:00,038 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta after 68054ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T03:49:01,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:01,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:02,040 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:02,040 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:03,040 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:03,041 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:04,041 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:04,041 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:05,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:05,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:05,862 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T03:49:06,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:06,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:07,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:07,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:07,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-24T03:49:07,446 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T03:49:07,450 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C45261%2C1732420116043.1732420147450 2024-11-24T03:49:07,455 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:07,455 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:07,456 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:07,456 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:07,456 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:07,456 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/WALs/71d8d2d6408d,45261,1732420116043/71d8d2d6408d%2C45261%2C1732420116043.1732420116572 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/WALs/71d8d2d6408d,45261,1732420116043/71d8d2d6408d%2C45261%2C1732420116043.1732420147450 2024-11-24T03:49:07,457 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43081:43081),(127.0.0.1/127.0.0.1:41241:41241)] 2024-11-24T03:49:07,457 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/WALs/71d8d2d6408d,45261,1732420116043/71d8d2d6408d%2C45261%2C1732420116043.1732420116572 is not closed yet, will try archiving it next time 2024-11-24T03:49:07,457 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T03:49:07,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741833_1009 (size=5546) 2024-11-24T03:49:07,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T03:49:07,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741833_1009 (size=5546) 2024-11-24T03:49:07,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-24T03:49:07,460 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T03:49:07,461 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T03:49:07,461 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T03:49:07,613 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45261 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-24T03:49:07,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:49:07,614 INFO [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 33c9d87ba47052a3c145f4311f8986e1 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T03:49:07,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/1496e530c55e49d8b6cdcdd38331684e is 1080, key is row0003/info:/1732420147448/Put/seqid=0 2024-11-24T03:49:07,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741840_1016 (size=6033) 2024-11-24T03:49:07,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741840_1016 (size=6033) 2024-11-24T03:49:08,025 INFO [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/1496e530c55e49d8b6cdcdd38331684e 2024-11-24T03:49:08,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/1496e530c55e49d8b6cdcdd38331684e as hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/1496e530c55e49d8b6cdcdd38331684e 2024-11-24T03:49:08,039 INFO [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/1496e530c55e49d8b6cdcdd38331684e, entries=1, sequenceid=13, filesize=5.9 K 2024-11-24T03:49:08,040 INFO [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 33c9d87ba47052a3c145f4311f8986e1 in 426ms, sequenceid=13, compaction requested=true 2024-11-24T03:49:08,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 33c9d87ba47052a3c145f4311f8986e1: 2024-11-24T03:49:08,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:49:08,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-24T03:49:08,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-24T03:49:08,044 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:08,044 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:08,046 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-24T03:49:08,046 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 581 msec 2024-11-24T03:49:08,048 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 590 msec 2024-11-24T03:49:09,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:09,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:10,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:10,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:11,046 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:11,046 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:12,047 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:12,047 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:13,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:13,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:14,049 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:14,049 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:15,050 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:15,050 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:16,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:16,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:17,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:17,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:17,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-24T03:49:17,492 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T03:49:17,492 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T03:49:17,494 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T03:49:17,494 DEBUG [Time-limited test {}] regionserver.HStore(1541): 33c9d87ba47052a3c145f4311f8986e1/info is initiating minor compaction (all files) 2024-11-24T03:49:17,494 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T03:49:17,494 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:17,494 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 33c9d87ba47052a3c145f4311f8986e1/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:49:17,494 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/61001eea8c494d0590095919d72f6c2e, hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/8874f466c1b8444d8e5c9bdbfac591ad, hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/1496e530c55e49d8b6cdcdd38331684e] into tmpdir=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp, totalSize=17.7 K 2024-11-24T03:49:17,495 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 61001eea8c494d0590095919d72f6c2e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732420127300 2024-11-24T03:49:17,495 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 8874f466c1b8444d8e5c9bdbfac591ad, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732420137343 2024-11-24T03:49:17,496 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1496e530c55e49d8b6cdcdd38331684e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732420147448 2024-11-24T03:49:17,509 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 33c9d87ba47052a3c145f4311f8986e1#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T03:49:17,510 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/d2806f3e20ef4f3badeb605500732e7f is 1080, key is row0001/info:/1732420127300/Put/seqid=0 2024-11-24T03:49:17,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741841_1017 (size=8296) 2024-11-24T03:49:17,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741841_1017 (size=8296) 2024-11-24T03:49:17,522 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/d2806f3e20ef4f3badeb605500732e7f as hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/d2806f3e20ef4f3badeb605500732e7f 2024-11-24T03:49:17,530 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 33c9d87ba47052a3c145f4311f8986e1/info of 33c9d87ba47052a3c145f4311f8986e1 into d2806f3e20ef4f3badeb605500732e7f(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T03:49:17,530 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 33c9d87ba47052a3c145f4311f8986e1: 2024-11-24T03:49:17,532 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C45261%2C1732420116043.1732420157532 2024-11-24T03:49:17,539 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:17,539 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:17,539 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:17,539 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:17,539 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:17,540 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/WALs/71d8d2d6408d,45261,1732420116043/71d8d2d6408d%2C45261%2C1732420116043.1732420147450 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/WALs/71d8d2d6408d,45261,1732420116043/71d8d2d6408d%2C45261%2C1732420116043.1732420157532 2024-11-24T03:49:17,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741839_1015 (size=2520) 2024-11-24T03:49:17,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741839_1015 (size=2520) 2024-11-24T03:49:17,543 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/WALs/71d8d2d6408d,45261,1732420116043/71d8d2d6408d%2C45261%2C1732420116043.1732420116572 to hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/oldWALs/71d8d2d6408d%2C45261%2C1732420116043.1732420116572 2024-11-24T03:49:17,543 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43081:43081),(127.0.0.1/127.0.0.1:41241:41241)] 2024-11-24T03:49:17,544 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T03:49:17,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T03:49:17,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-24T03:49:17,546 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T03:49:17,548 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T03:49:17,548 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T03:49:17,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45261 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-24T03:49:17,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:49:17,701 INFO [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 33c9d87ba47052a3c145f4311f8986e1 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T03:49:17,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/5f3b5f092a2a44bcb6d9c682e2390a3c is 1080, key is row0000/info:/1732420157531/Put/seqid=0 2024-11-24T03:49:17,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741843_1019 (size=6033) 2024-11-24T03:49:17,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741843_1019 (size=6033) 2024-11-24T03:49:17,714 INFO [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/5f3b5f092a2a44bcb6d9c682e2390a3c 2024-11-24T03:49:17,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/5f3b5f092a2a44bcb6d9c682e2390a3c as hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/5f3b5f092a2a44bcb6d9c682e2390a3c 2024-11-24T03:49:17,726 INFO [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/5f3b5f092a2a44bcb6d9c682e2390a3c, entries=1, sequenceid=18, filesize=5.9 K 2024-11-24T03:49:17,727 INFO [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 33c9d87ba47052a3c145f4311f8986e1 in 26ms, sequenceid=18, compaction requested=false 2024-11-24T03:49:17,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 33c9d87ba47052a3c145f4311f8986e1: 2024-11-24T03:49:17,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:49:17,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-24T03:49:17,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-24T03:49:17,732 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-24T03:49:17,732 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-11-24T03:49:17,735 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 189 msec 2024-11-24T03:49:17,770 INFO [master/71d8d2d6408d:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T03:49:17,770 INFO [master/71d8d2d6408d:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T03:49:18,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:18,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:19,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:19,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:20,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:20,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:21,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:21,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:22,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:22,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:22,552 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 33c9d87ba47052a3c145f4311f8986e1, had cached 0 bytes from a total of 14329 2024-11-24T03:49:23,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:23,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:24,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:24,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:25,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:25,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:26,058 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:26,058 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:27,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:27,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:27,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46485 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-24T03:49:27,601 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T03:49:27,604 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C45261%2C1732420116043.1732420167604 2024-11-24T03:49:27,610 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:27,610 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:27,610 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:27,610 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:27,610 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:27,610 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/WALs/71d8d2d6408d,45261,1732420116043/71d8d2d6408d%2C45261%2C1732420116043.1732420157532 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/WALs/71d8d2d6408d,45261,1732420116043/71d8d2d6408d%2C45261%2C1732420116043.1732420167604 2024-11-24T03:49:27,611 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43081:43081),(127.0.0.1/127.0.0.1:41241:41241)] 2024-11-24T03:49:27,611 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/WALs/71d8d2d6408d,45261,1732420116043/71d8d2d6408d%2C45261%2C1732420116043.1732420157532 is not closed yet, will try archiving it next time 2024-11-24T03:49:27,611 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/WALs/71d8d2d6408d,45261,1732420116043/71d8d2d6408d%2C45261%2C1732420116043.1732420147450 to hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/oldWALs/71d8d2d6408d%2C45261%2C1732420116043.1732420147450 2024-11-24T03:49:27,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T03:49:27,611 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T03:49:27,611 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:49:27,611 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:49:27,612 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:49:27,612 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T03:49:27,612 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T03:49:27,612 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2035120877, stopped=false 2024-11-24T03:49:27,612 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=71d8d2d6408d,46485,1732420115890 2024-11-24T03:49:27,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741842_1018 (size=2026) 2024-11-24T03:49:27,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741842_1018 (size=2026) 2024-11-24T03:49:27,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T03:49:27,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T03:49:27,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:49:27,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:49:27,658 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T03:49:27,658 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T03:49:27,658 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:49:27,658 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:49:27,658 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '71d8d2d6408d,45261,1732420116043' ***** 2024-11-24T03:49:27,658 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T03:49:27,659 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T03:49:27,659 INFO [RS:0;71d8d2d6408d:45261 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T03:49:27,659 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T03:49:27,659 INFO [RS:0;71d8d2d6408d:45261 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T03:49:27,659 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer(3091): Received CLOSE for 33c9d87ba47052a3c145f4311f8986e1 2024-11-24T03:49:27,659 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer(959): stopping server 71d8d2d6408d,45261,1732420116043 2024-11-24T03:49:27,659 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T03:49:27,659 INFO [RS:0;71d8d2d6408d:45261 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;71d8d2d6408d:45261. 2024-11-24T03:49:27,659 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:49:27,659 DEBUG [RS:0;71d8d2d6408d:45261 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:49:27,659 DEBUG [RS:0;71d8d2d6408d:45261 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:49:27,659 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 33c9d87ba47052a3c145f4311f8986e1, disabling compactions & flushes 2024-11-24T03:49:27,659 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:49:27,659 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T03:49:27,659 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:49:27,659 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T03:49:27,659 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T03:49:27,659 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. after waiting 0 ms 2024-11-24T03:49:27,659 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:49:27,659 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T03:49:27,660 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 33c9d87ba47052a3c145f4311f8986e1 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T03:49:27,660 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T03:49:27,660 DEBUG [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 33c9d87ba47052a3c145f4311f8986e1=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1.} 2024-11-24T03:49:27,660 DEBUG [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 33c9d87ba47052a3c145f4311f8986e1 2024-11-24T03:49:27,660 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:49:27,660 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T03:49:27,660 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T03:49:27,660 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T03:49:27,660 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T03:49:27,660 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T03:49:27,660 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-24T03:49:27,669 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/9ed5cd73574240e390d2d93a8a84ac4e is 1080, key is row0001/info:/1732420167602/Put/seqid=0 2024-11-24T03:49:27,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741845_1021 (size=6033) 2024-11-24T03:49:27,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741845_1021 (size=6033) 2024-11-24T03:49:27,674 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/9ed5cd73574240e390d2d93a8a84ac4e 2024-11-24T03:49:27,679 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/.tmp/info/7554c0a87e2f452297037f83c4e37d54 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1./info:regioninfo/1732420117569/Put/seqid=0 2024-11-24T03:49:27,681 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/.tmp/info/9ed5cd73574240e390d2d93a8a84ac4e as hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/9ed5cd73574240e390d2d93a8a84ac4e 2024-11-24T03:49:27,689 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/9ed5cd73574240e390d2d93a8a84ac4e, entries=1, sequenceid=22, filesize=5.9 K 2024-11-24T03:49:27,691 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 33c9d87ba47052a3c145f4311f8986e1 in 31ms, sequenceid=22, compaction requested=true 2024-11-24T03:49:27,695 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/61001eea8c494d0590095919d72f6c2e, hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/8874f466c1b8444d8e5c9bdbfac591ad, hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/1496e530c55e49d8b6cdcdd38331684e] to archive 2024-11-24T03:49:27,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741846_1022 (size=7308) 2024-11-24T03:49:27,696 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T03:49:27,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741846_1022 (size=7308) 2024-11-24T03:49:27,696 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/.tmp/info/7554c0a87e2f452297037f83c4e37d54 2024-11-24T03:49:27,697 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/61001eea8c494d0590095919d72f6c2e to hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/61001eea8c494d0590095919d72f6c2e 2024-11-24T03:49:27,698 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/8874f466c1b8444d8e5c9bdbfac591ad to hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/8874f466c1b8444d8e5c9bdbfac591ad 2024-11-24T03:49:27,700 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/1496e530c55e49d8b6cdcdd38331684e to hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/info/1496e530c55e49d8b6cdcdd38331684e 2024-11-24T03:49:27,700 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=71d8d2d6408d:46485 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T03:49:27,701 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [61001eea8c494d0590095919d72f6c2e=6033, 8874f466c1b8444d8e5c9bdbfac591ad=6033, 1496e530c55e49d8b6cdcdd38331684e=6033] 2024-11-24T03:49:27,704 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/33c9d87ba47052a3c145f4311f8986e1/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-24T03:49:27,705 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:49:27,705 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 33c9d87ba47052a3c145f4311f8986e1: Waiting for close lock at 1732420167659Running coprocessor pre-close hooks at 1732420167659Disabling compacts and flushes for region at 1732420167659Disabling writes for close at 1732420167659Obtaining lock to block concurrent updates at 1732420167660 (+1 ms)Preparing flush snapshotting stores in 33c9d87ba47052a3c145f4311f8986e1 at 1732420167660Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732420167660Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. at 1732420167665 (+5 ms)Flushing 33c9d87ba47052a3c145f4311f8986e1/info: creating writer at 1732420167666 (+1 ms)Flushing 33c9d87ba47052a3c145f4311f8986e1/info: appending metadata at 1732420167668 (+2 ms)Flushing 33c9d87ba47052a3c145f4311f8986e1/info: closing flushed file at 1732420167668Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6039d4a5: reopening flushed file at 1732420167680 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 33c9d87ba47052a3c145f4311f8986e1 in 31ms, sequenceid=22, compaction requested=true at 1732420167691 (+11 ms)Writing region close event to WAL at 1732420167701 (+10 ms)Running coprocessor post-close hooks at 1732420167705 (+4 ms)Closed at 1732420167705 2024-11-24T03:49:27,705 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732420117200.33c9d87ba47052a3c145f4311f8986e1. 2024-11-24T03:49:27,725 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/.tmp/ns/4f3190b5d9534019bc3c9d0befcfe847 is 43, key is default/ns:d/1732420117123/Put/seqid=0 2024-11-24T03:49:27,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741847_1023 (size=5153) 2024-11-24T03:49:27,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741847_1023 (size=5153) 2024-11-24T03:49:27,730 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/.tmp/ns/4f3190b5d9534019bc3c9d0befcfe847 2024-11-24T03:49:27,750 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/.tmp/table/07c9ba1310b242d1a581c6208e6eaa02 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732420117582/Put/seqid=0 2024-11-24T03:49:27,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741848_1024 (size=5508) 2024-11-24T03:49:27,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741848_1024 (size=5508) 2024-11-24T03:49:27,755 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/.tmp/table/07c9ba1310b242d1a581c6208e6eaa02 2024-11-24T03:49:27,761 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/.tmp/info/7554c0a87e2f452297037f83c4e37d54 as hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/info/7554c0a87e2f452297037f83c4e37d54 2024-11-24T03:49:27,766 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/info/7554c0a87e2f452297037f83c4e37d54, entries=10, sequenceid=11, filesize=7.1 K 2024-11-24T03:49:27,767 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/.tmp/ns/4f3190b5d9534019bc3c9d0befcfe847 as hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/ns/4f3190b5d9534019bc3c9d0befcfe847 2024-11-24T03:49:27,773 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/ns/4f3190b5d9534019bc3c9d0befcfe847, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T03:49:27,774 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/.tmp/table/07c9ba1310b242d1a581c6208e6eaa02 as hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/table/07c9ba1310b242d1a581c6208e6eaa02 2024-11-24T03:49:27,779 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/table/07c9ba1310b242d1a581c6208e6eaa02, entries=2, sequenceid=11, filesize=5.4 K 2024-11-24T03:49:27,780 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 120ms, sequenceid=11, compaction requested=false 2024-11-24T03:49:27,787 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T03:49:27,787 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T03:49:27,787 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T03:49:27,788 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732420167660Running coprocessor pre-close hooks at 1732420167660Disabling compacts and flushes for region at 1732420167660Disabling writes for close at 1732420167660Obtaining lock to block concurrent updates at 1732420167661 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732420167661Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732420167661Flushing stores of hbase:meta,,1.1588230740 at 1732420167661Flushing 1588230740/info: creating writer at 1732420167662 (+1 ms)Flushing 1588230740/info: appending metadata at 1732420167678 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732420167678Flushing 1588230740/ns: creating writer at 1732420167702 (+24 ms)Flushing 1588230740/ns: appending metadata at 1732420167724 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1732420167724Flushing 1588230740/table: creating writer at 1732420167736 (+12 ms)Flushing 1588230740/table: appending metadata at 1732420167750 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732420167750Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f68dc26: reopening flushed file at 1732420167760 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4dea5664: reopening flushed file at 1732420167766 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1135c0cb: reopening flushed file at 1732420167773 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 120ms, sequenceid=11, compaction requested=false at 1732420167780 (+7 ms)Writing region close event to WAL at 1732420167783 (+3 ms)Running coprocessor post-close hooks at 1732420167787 (+4 ms)Closed at 1732420167787 2024-11-24T03:49:27,788 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T03:49:27,860 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer(976): stopping server 71d8d2d6408d,45261,1732420116043; all regions closed. 2024-11-24T03:49:27,861 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:27,861 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:27,861 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:27,861 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:27,862 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:27,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741834_1010 (size=3306) 2024-11-24T03:49:27,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741834_1010 (size=3306) 2024-11-24T03:49:27,869 DEBUG [RS:0;71d8d2d6408d:45261 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/oldWALs 2024-11-24T03:49:27,869 INFO [RS:0;71d8d2d6408d:45261 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 71d8d2d6408d%2C45261%2C1732420116043.meta:.meta(num 1732420117005) 2024-11-24T03:49:27,870 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:27,870 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:27,870 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:27,870 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:27,870 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:27,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741844_1020 (size=1252) 2024-11-24T03:49:27,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741844_1020 (size=1252) 2024-11-24T03:49:28,013 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/WALs/71d8d2d6408d,45261,1732420116043/71d8d2d6408d%2C45261%2C1732420116043.1732420157532 to hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/oldWALs/71d8d2d6408d%2C45261%2C1732420116043.1732420157532 2024-11-24T03:49:28,016 DEBUG [RS:0;71d8d2d6408d:45261 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/oldWALs 2024-11-24T03:49:28,016 INFO [RS:0;71d8d2d6408d:45261 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 71d8d2d6408d%2C45261%2C1732420116043:(num 1732420167604) 2024-11-24T03:49:28,016 DEBUG [RS:0;71d8d2d6408d:45261 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:49:28,016 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T03:49:28,016 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T03:49:28,016 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.ChoreService(370): Chore service for: regionserver/71d8d2d6408d:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T03:49:28,016 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T03:49:28,016 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T03:49:28,016 INFO [RS:0;71d8d2d6408d:45261 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45261 2024-11-24T03:49:28,045 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T03:49:28,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T03:49:28,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/71d8d2d6408d,45261,1732420116043 2024-11-24T03:49:28,045 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [71d8d2d6408d,45261,1732420116043] 2024-11-24T03:49:28,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:28,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:28,061 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/71d8d2d6408d,45261,1732420116043 already deleted, retry=false 2024-11-24T03:49:28,061 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 71d8d2d6408d,45261,1732420116043 expired; onlineServers=0 2024-11-24T03:49:28,061 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '71d8d2d6408d,46485,1732420115890' ***** 2024-11-24T03:49:28,061 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T03:49:28,061 INFO [M:0;71d8d2d6408d:46485 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T03:49:28,061 INFO [M:0;71d8d2d6408d:46485 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T03:49:28,062 DEBUG [M:0;71d8d2d6408d:46485 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T03:49:28,062 DEBUG [M:0;71d8d2d6408d:46485 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T03:49:28,062 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T03:49:28,062 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420116377 {}] cleaner.HFileCleaner(306): Exit Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420116377,5,FailOnTimeoutGroup] 2024-11-24T03:49:28,062 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420116377 {}] cleaner.HFileCleaner(306): Exit Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420116377,5,FailOnTimeoutGroup] 2024-11-24T03:49:28,062 INFO [M:0;71d8d2d6408d:46485 {}] hbase.ChoreService(370): Chore service for: master/71d8d2d6408d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T03:49:28,062 INFO [M:0;71d8d2d6408d:46485 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T03:49:28,062 DEBUG [M:0;71d8d2d6408d:46485 {}] master.HMaster(1795): Stopping service threads 2024-11-24T03:49:28,062 INFO [M:0;71d8d2d6408d:46485 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T03:49:28,062 INFO [M:0;71d8d2d6408d:46485 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T03:49:28,062 INFO [M:0;71d8d2d6408d:46485 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T03:49:28,062 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T03:49:28,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T03:49:28,070 DEBUG [M:0;71d8d2d6408d:46485 {}] zookeeper.ZKUtil(347): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T03:49:28,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:49:28,070 WARN [M:0;71d8d2d6408d:46485 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T03:49:28,070 INFO [M:0;71d8d2d6408d:46485 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/.lastflushedseqids 2024-11-24T03:49:28,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741849_1025 (size=130) 2024-11-24T03:49:28,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741849_1025 (size=130) 2024-11-24T03:49:28,076 INFO [M:0;71d8d2d6408d:46485 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T03:49:28,076 INFO [M:0;71d8d2d6408d:46485 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T03:49:28,076 DEBUG [M:0;71d8d2d6408d:46485 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T03:49:28,076 INFO [M:0;71d8d2d6408d:46485 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:49:28,077 DEBUG [M:0;71d8d2d6408d:46485 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:49:28,077 DEBUG [M:0;71d8d2d6408d:46485 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T03:49:28,077 DEBUG [M:0;71d8d2d6408d:46485 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:49:28,077 INFO [M:0;71d8d2d6408d:46485 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.61 KB heapSize=55.02 KB 2024-11-24T03:49:28,093 DEBUG [M:0;71d8d2d6408d:46485 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9709f8240b424511859b77173a46719a is 82, key is hbase:meta,,1/info:regioninfo/1732420117036/Put/seqid=0 2024-11-24T03:49:28,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741850_1026 (size=5672) 2024-11-24T03:49:28,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741850_1026 (size=5672) 2024-11-24T03:49:28,098 INFO [M:0;71d8d2d6408d:46485 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9709f8240b424511859b77173a46719a 2024-11-24T03:49:28,120 DEBUG [M:0;71d8d2d6408d:46485 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fdbb3755ed0540e5b49d94fcbf54e3cd is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732420117588/Put/seqid=0 2024-11-24T03:49:28,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741851_1027 (size=7825) 2024-11-24T03:49:28,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741851_1027 (size=7825) 2024-11-24T03:49:28,125 INFO [M:0;71d8d2d6408d:46485 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.01 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fdbb3755ed0540e5b49d94fcbf54e3cd 2024-11-24T03:49:28,131 INFO [M:0;71d8d2d6408d:46485 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fdbb3755ed0540e5b49d94fcbf54e3cd 2024-11-24T03:49:28,152 DEBUG [M:0;71d8d2d6408d:46485 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7d893d11c46e471a8b7a1be43d2783a0 is 69, key is 71d8d2d6408d,45261,1732420116043/rs:state/1732420116417/Put/seqid=0 2024-11-24T03:49:28,153 INFO [RS:0;71d8d2d6408d:45261 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T03:49:28,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:49:28,153 INFO [RS:0;71d8d2d6408d:45261 {}] regionserver.HRegionServer(1031): Exiting; stopping=71d8d2d6408d,45261,1732420116043; zookeeper connection closed. 2024-11-24T03:49:28,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45261-0x1016c3eb05e0001, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:49:28,154 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@61f57479 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@61f57479 2024-11-24T03:49:28,154 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T03:49:28,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741852_1028 (size=5156) 2024-11-24T03:49:28,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741852_1028 (size=5156) 2024-11-24T03:49:28,157 INFO [M:0;71d8d2d6408d:46485 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7d893d11c46e471a8b7a1be43d2783a0 2024-11-24T03:49:28,179 DEBUG [M:0;71d8d2d6408d:46485 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fabb7aa43e5e4996a37d495de479d484 is 52, key is load_balancer_on/state:d/1732420117195/Put/seqid=0 2024-11-24T03:49:28,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741853_1029 (size=5056) 2024-11-24T03:49:28,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741853_1029 (size=5056) 2024-11-24T03:49:28,185 INFO [M:0;71d8d2d6408d:46485 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fabb7aa43e5e4996a37d495de479d484 2024-11-24T03:49:28,192 DEBUG [M:0;71d8d2d6408d:46485 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9709f8240b424511859b77173a46719a as hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9709f8240b424511859b77173a46719a 2024-11-24T03:49:28,198 INFO [M:0;71d8d2d6408d:46485 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9709f8240b424511859b77173a46719a, entries=8, sequenceid=121, filesize=5.5 K 2024-11-24T03:49:28,199 DEBUG [M:0;71d8d2d6408d:46485 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fdbb3755ed0540e5b49d94fcbf54e3cd as hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fdbb3755ed0540e5b49d94fcbf54e3cd 2024-11-24T03:49:28,204 INFO [M:0;71d8d2d6408d:46485 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fdbb3755ed0540e5b49d94fcbf54e3cd 2024-11-24T03:49:28,204 INFO [M:0;71d8d2d6408d:46485 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fdbb3755ed0540e5b49d94fcbf54e3cd, entries=14, sequenceid=121, filesize=7.6 K 2024-11-24T03:49:28,205 DEBUG [M:0;71d8d2d6408d:46485 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7d893d11c46e471a8b7a1be43d2783a0 as hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7d893d11c46e471a8b7a1be43d2783a0 2024-11-24T03:49:28,210 INFO [M:0;71d8d2d6408d:46485 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7d893d11c46e471a8b7a1be43d2783a0, entries=1, sequenceid=121, filesize=5.0 K 2024-11-24T03:49:28,211 DEBUG [M:0;71d8d2d6408d:46485 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fabb7aa43e5e4996a37d495de479d484 as hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fabb7aa43e5e4996a37d495de479d484 2024-11-24T03:49:28,216 INFO [M:0;71d8d2d6408d:46485 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46531/user/jenkins/test-data/f9703012-070e-ab1c-0b97-486f1abe6ff1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fabb7aa43e5e4996a37d495de479d484, entries=1, sequenceid=121, filesize=4.9 K 2024-11-24T03:49:28,217 INFO [M:0;71d8d2d6408d:46485 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.61 KB/44659, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 140ms, sequenceid=121, compaction requested=false 2024-11-24T03:49:28,218 INFO [M:0;71d8d2d6408d:46485 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:49:28,218 DEBUG [M:0;71d8d2d6408d:46485 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732420168076Disabling compacts and flushes for region at 1732420168076Disabling writes for close at 1732420168077 (+1 ms)Obtaining lock to block concurrent updates at 1732420168077Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732420168077Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44659, getHeapSize=56272, getOffHeapSize=0, getCellsCount=140 at 1732420168077Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732420168078 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732420168078Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732420168093 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732420168093Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732420168105 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732420168120 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732420168120Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732420168131 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732420168151 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732420168151Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732420168163 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732420168179 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732420168179Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bca2360: reopening flushed file at 1732420168191 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d91805f: reopening flushed file at 1732420168198 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b33127: reopening flushed file at 1732420168204 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48cef1bb: reopening flushed file at 1732420168210 (+6 ms)Finished flush of dataSize ~43.61 KB/44659, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 140ms, sequenceid=121, compaction requested=false at 1732420168217 (+7 ms)Writing region close event to WAL at 1732420168218 (+1 ms)Closed at 1732420168218 2024-11-24T03:49:28,219 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:28,219 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:28,219 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:28,219 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:28,219 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:49:28,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44679 is added to blk_1073741830_1006 (size=53056) 2024-11-24T03:49:28,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33587 is added to blk_1073741830_1006 (size=53056) 2024-11-24T03:49:28,221 INFO [M:0;71d8d2d6408d:46485 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T03:49:28,221 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T03:49:28,221 INFO [M:0;71d8d2d6408d:46485 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46485 2024-11-24T03:49:28,222 INFO [M:0;71d8d2d6408d:46485 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T03:49:28,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:49:28,345 INFO [M:0;71d8d2d6408d:46485 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T03:49:28,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46485-0x1016c3eb05e0000, quorum=127.0.0.1:65090, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:49:28,356 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5559c5a9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:49:28,356 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25901f18{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:49:28,356 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:49:28,356 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50c8efd9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:49:28,356 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c78c403{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/hadoop.log.dir/,STOPPED} 2024-11-24T03:49:28,358 WARN [BP-1531438035-172.17.0.2-1732420114136 heartbeating to localhost/127.0.0.1:46531 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:49:28,358 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:49:28,359 WARN [BP-1531438035-172.17.0.2-1732420114136 heartbeating to localhost/127.0.0.1:46531 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1531438035-172.17.0.2-1732420114136 (Datanode Uuid b6c6a8ed-e077-446f-a530-41c6896271fc) service to localhost/127.0.0.1:46531 2024-11-24T03:49:28,359 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:49:28,359 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/cluster_33b1a009-e793-0723-44c1-292fd1be5d13/data/data3/current/BP-1531438035-172.17.0.2-1732420114136 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:49:28,359 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/cluster_33b1a009-e793-0723-44c1-292fd1be5d13/data/data4/current/BP-1531438035-172.17.0.2-1732420114136 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:49:28,360 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:49:28,371 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58200445{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:49:28,372 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@403776ad{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:49:28,372 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:49:28,372 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e17225c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:49:28,372 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@724d8870{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/hadoop.log.dir/,STOPPED} 2024-11-24T03:49:28,374 WARN [BP-1531438035-172.17.0.2-1732420114136 heartbeating to localhost/127.0.0.1:46531 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:49:28,374 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:49:28,374 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:49:28,374 WARN [BP-1531438035-172.17.0.2-1732420114136 heartbeating to localhost/127.0.0.1:46531 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1531438035-172.17.0.2-1732420114136 (Datanode Uuid f865cced-d508-4799-9a1a-385e66b7bf18) service to localhost/127.0.0.1:46531 2024-11-24T03:49:28,374 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/cluster_33b1a009-e793-0723-44c1-292fd1be5d13/data/data1/current/BP-1531438035-172.17.0.2-1732420114136 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:49:28,375 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/cluster_33b1a009-e793-0723-44c1-292fd1be5d13/data/data2/current/BP-1531438035-172.17.0.2-1732420114136 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:49:28,375 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:49:28,381 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@38b36302{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T03:49:28,382 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@40cc7ce8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:49:28,382 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:49:28,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@675e37f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:49:28,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@153be6d7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/hadoop.log.dir/,STOPPED} 2024-11-24T03:49:28,388 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T03:49:28,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T03:49:28,419 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=208 (was 182) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46531 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46531 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:46531 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:46531 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/71d8d2d6408d:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46531 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:46531 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:46531 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46531 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=222 (was 195) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6612 (was 6874) 2024-11-24T03:49:28,428 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=208, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=222, ProcessCount=11, AvailableMemoryMB=6612 2024-11-24T03:49:28,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T03:49:28,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/hadoop.log.dir so I do NOT create it in target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d 2024-11-24T03:49:28,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d38f8d1-b087-f32e-c2b0-b78ecb74568a/hadoop.tmp.dir so I do NOT create it in target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d 2024-11-24T03:49:28,428 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/cluster_9205b51b-4f95-8b19-2a98-bca52bf86be3, deleteOnExit=true 2024-11-24T03:49:28,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T03:49:28,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/test.cache.data in system properties and HBase conf 2024-11-24T03:49:28,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T03:49:28,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/hadoop.log.dir in system properties and HBase conf 2024-11-24T03:49:28,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T03:49:28,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T03:49:28,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T03:49:28,429 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T03:49:28,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T03:49:28,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T03:49:28,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T03:49:28,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T03:49:28,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T03:49:28,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T03:49:28,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T03:49:28,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T03:49:28,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T03:49:28,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/nfs.dump.dir in system properties and HBase conf 2024-11-24T03:49:28,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/java.io.tmpdir in system properties and HBase conf 2024-11-24T03:49:28,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T03:49:28,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T03:49:28,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T03:49:28,441 INFO [regionserver/71d8d2d6408d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T03:49:28,444 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T03:49:28,748 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:49:28,752 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:49:28,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:49:28,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:49:28,753 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T03:49:28,754 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:49:28,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79a27881{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:49:28,755 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c4ff7f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:49:28,867 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@32fa876c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/java.io.tmpdir/jetty-localhost-32829-hadoop-hdfs-3_4_1-tests_jar-_-any-14924088461055249966/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T03:49:28,867 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33d970b6{HTTP/1.1, (http/1.1)}{localhost:32829} 2024-11-24T03:49:28,867 INFO [Time-limited test {}] server.Server(415): Started @248603ms 2024-11-24T03:49:28,880 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T03:49:29,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:29,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:29,079 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:49:29,082 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:49:29,083 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:49:29,083 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:49:29,083 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T03:49:29,084 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1cb72b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:49:29,084 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fd0bcc5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:49:29,197 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b8ac8f8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/java.io.tmpdir/jetty-localhost-43717-hadoop-hdfs-3_4_1-tests_jar-_-any-10674563957431755728/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:49:29,197 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@457f5010{HTTP/1.1, (http/1.1)}{localhost:43717} 2024-11-24T03:49:29,197 INFO [Time-limited test {}] server.Server(415): Started @248932ms 2024-11-24T03:49:29,198 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:49:29,230 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:49:29,233 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:49:29,234 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:49:29,234 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:49:29,234 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T03:49:29,235 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@604cd81b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:49:29,235 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7dc3fcc3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:49:29,341 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ddec4b9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/java.io.tmpdir/jetty-localhost-34245-hadoop-hdfs-3_4_1-tests_jar-_-any-2189524322465352838/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:49:29,341 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@637ef10b{HTTP/1.1, (http/1.1)}{localhost:34245} 2024-11-24T03:49:29,341 INFO [Time-limited test {}] server.Server(415): Started @249076ms 2024-11-24T03:49:29,342 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:49:29,882 WARN [Thread-1964 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/cluster_9205b51b-4f95-8b19-2a98-bca52bf86be3/data/data1/current/BP-2086317584-172.17.0.2-1732420168448/current, will proceed with Du for space computation calculation, 2024-11-24T03:49:29,882 WARN [Thread-1965 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/cluster_9205b51b-4f95-8b19-2a98-bca52bf86be3/data/data2/current/BP-2086317584-172.17.0.2-1732420168448/current, will proceed with Du for space computation calculation, 2024-11-24T03:49:29,897 WARN [Thread-1928 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:49:29,899 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ae9e338c19ef4c8 with lease ID 0x4572fd7ad77cb063: Processing first storage report for DS-c229466f-d622-496b-a18f-2b8a127fb5da from datanode DatanodeRegistration(127.0.0.1:33007, datanodeUuid=1fac988b-a195-4679-93e0-0d1e690751ad, infoPort=38639, infoSecurePort=0, ipcPort=40581, storageInfo=lv=-57;cid=testClusterID;nsid=1710067925;c=1732420168448) 2024-11-24T03:49:29,900 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ae9e338c19ef4c8 with lease ID 0x4572fd7ad77cb063: from storage DS-c229466f-d622-496b-a18f-2b8a127fb5da node DatanodeRegistration(127.0.0.1:33007, datanodeUuid=1fac988b-a195-4679-93e0-0d1e690751ad, infoPort=38639, infoSecurePort=0, ipcPort=40581, storageInfo=lv=-57;cid=testClusterID;nsid=1710067925;c=1732420168448), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:49:29,900 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ae9e338c19ef4c8 with lease ID 0x4572fd7ad77cb063: Processing first storage report for DS-7abe78d3-38e1-4724-af3c-7c4bce04fa68 from datanode DatanodeRegistration(127.0.0.1:33007, datanodeUuid=1fac988b-a195-4679-93e0-0d1e690751ad, infoPort=38639, infoSecurePort=0, ipcPort=40581, storageInfo=lv=-57;cid=testClusterID;nsid=1710067925;c=1732420168448) 2024-11-24T03:49:29,900 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ae9e338c19ef4c8 with lease ID 0x4572fd7ad77cb063: from storage DS-7abe78d3-38e1-4724-af3c-7c4bce04fa68 node DatanodeRegistration(127.0.0.1:33007, datanodeUuid=1fac988b-a195-4679-93e0-0d1e690751ad, infoPort=38639, infoSecurePort=0, ipcPort=40581, storageInfo=lv=-57;cid=testClusterID;nsid=1710067925;c=1732420168448), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:49:30,050 WARN [Thread-1975 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/cluster_9205b51b-4f95-8b19-2a98-bca52bf86be3/data/data3/current/BP-2086317584-172.17.0.2-1732420168448/current, will proceed with Du for space computation calculation, 2024-11-24T03:49:30,050 WARN [Thread-1976 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/cluster_9205b51b-4f95-8b19-2a98-bca52bf86be3/data/data4/current/BP-2086317584-172.17.0.2-1732420168448/current, will proceed with Du for space computation calculation, 2024-11-24T03:49:30,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:30,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:30,066 WARN [Thread-1951 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:49:30,068 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xccb4febfb327791b with lease ID 0x4572fd7ad77cb064: Processing first storage report for DS-58ea43b8-3339-42ed-b3e5-ce0731bf2038 from datanode DatanodeRegistration(127.0.0.1:42655, datanodeUuid=eb6d2498-093a-4d4f-ab8e-cba0767cb1b0, infoPort=37621, infoSecurePort=0, ipcPort=43883, storageInfo=lv=-57;cid=testClusterID;nsid=1710067925;c=1732420168448) 2024-11-24T03:49:30,068 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xccb4febfb327791b with lease ID 0x4572fd7ad77cb064: from storage DS-58ea43b8-3339-42ed-b3e5-ce0731bf2038 node DatanodeRegistration(127.0.0.1:42655, datanodeUuid=eb6d2498-093a-4d4f-ab8e-cba0767cb1b0, infoPort=37621, infoSecurePort=0, ipcPort=43883, storageInfo=lv=-57;cid=testClusterID;nsid=1710067925;c=1732420168448), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T03:49:30,068 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xccb4febfb327791b with lease ID 0x4572fd7ad77cb064: Processing first storage report for DS-9b6673a0-c8f6-464e-a9b2-2ac529ac911d from datanode DatanodeRegistration(127.0.0.1:42655, datanodeUuid=eb6d2498-093a-4d4f-ab8e-cba0767cb1b0, infoPort=37621, infoSecurePort=0, ipcPort=43883, storageInfo=lv=-57;cid=testClusterID;nsid=1710067925;c=1732420168448) 2024-11-24T03:49:30,068 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xccb4febfb327791b with lease ID 0x4572fd7ad77cb064: from storage DS-9b6673a0-c8f6-464e-a9b2-2ac529ac911d node DatanodeRegistration(127.0.0.1:42655, datanodeUuid=eb6d2498-093a-4d4f-ab8e-cba0767cb1b0, infoPort=37621, infoSecurePort=0, ipcPort=43883, storageInfo=lv=-57;cid=testClusterID;nsid=1710067925;c=1732420168448), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:49:30,073 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d 2024-11-24T03:49:30,076 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/cluster_9205b51b-4f95-8b19-2a98-bca52bf86be3/zookeeper_0, clientPort=56614, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/cluster_9205b51b-4f95-8b19-2a98-bca52bf86be3/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/cluster_9205b51b-4f95-8b19-2a98-bca52bf86be3/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T03:49:30,076 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56614 2024-11-24T03:49:30,077 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:49:30,078 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:49:30,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741825_1001 (size=7) 2024-11-24T03:49:30,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741825_1001 (size=7) 2024-11-24T03:49:30,086 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e with version=8 2024-11-24T03:49:30,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/hbase-staging 2024-11-24T03:49:30,088 INFO [Time-limited test {}] client.ConnectionUtils(128): master/71d8d2d6408d:0 server-side Connection retries=45 2024-11-24T03:49:30,088 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:49:30,088 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T03:49:30,088 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T03:49:30,088 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:49:30,089 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T03:49:30,089 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T03:49:30,089 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T03:49:30,089 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35951 2024-11-24T03:49:30,091 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35951 connecting to ZooKeeper ensemble=127.0.0.1:56614 2024-11-24T03:49:30,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:359510x0, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T03:49:30,141 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35951-0x1016c3f841f0000 connected 2024-11-24T03:49:30,204 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:49:30,205 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:49:30,207 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:49:30,207 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e, hbase.cluster.distributed=false 2024-11-24T03:49:30,208 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T03:49:30,208 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35951 2024-11-24T03:49:30,209 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35951 2024-11-24T03:49:30,209 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35951 2024-11-24T03:49:30,209 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35951 2024-11-24T03:49:30,209 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35951 2024-11-24T03:49:30,225 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/71d8d2d6408d:0 server-side Connection retries=45 2024-11-24T03:49:30,225 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:49:30,225 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T03:49:30,225 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T03:49:30,225 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:49:30,225 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T03:49:30,225 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T03:49:30,226 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T03:49:30,226 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46657 2024-11-24T03:49:30,228 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46657 connecting to ZooKeeper ensemble=127.0.0.1:56614 2024-11-24T03:49:30,228 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:49:30,230 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:49:30,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:466570x0, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T03:49:30,237 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:466570x0, quorum=127.0.0.1:56614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:49:30,237 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46657-0x1016c3f841f0001 connected 2024-11-24T03:49:30,238 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T03:49:30,238 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T03:49:30,239 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T03:49:30,240 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T03:49:30,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46657 2024-11-24T03:49:30,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46657 2024-11-24T03:49:30,241 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46657 2024-11-24T03:49:30,241 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46657 2024-11-24T03:49:30,241 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46657 2024-11-24T03:49:30,252 DEBUG [M:0;71d8d2d6408d:35951 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;71d8d2d6408d:35951 2024-11-24T03:49:30,253 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/71d8d2d6408d,35951,1732420170088 2024-11-24T03:49:30,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:49:30,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:49:30,263 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/71d8d2d6408d,35951,1732420170088 2024-11-24T03:49:30,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T03:49:30,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:49:30,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:49:30,271 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T03:49:30,271 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/71d8d2d6408d,35951,1732420170088 from backup master directory 2024-11-24T03:49:30,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:49:30,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/71d8d2d6408d,35951,1732420170088 2024-11-24T03:49:30,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:49:30,279 WARN [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T03:49:30,279 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=71d8d2d6408d,35951,1732420170088 2024-11-24T03:49:30,283 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/hbase.id] with ID: dd6256f7-9b20-4668-9596-d185cc89ebfb 2024-11-24T03:49:30,283 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/.tmp/hbase.id 2024-11-24T03:49:30,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741826_1002 (size=42) 2024-11-24T03:49:30,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741826_1002 (size=42) 2024-11-24T03:49:30,295 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/.tmp/hbase.id]:[hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/hbase.id] 2024-11-24T03:49:30,307 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:49:30,307 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T03:49:30,308 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T03:49:30,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:49:30,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:49:30,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741827_1003 (size=196) 2024-11-24T03:49:30,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741827_1003 (size=196) 2024-11-24T03:49:30,351 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T03:49:30,351 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T03:49:30,352 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:49:30,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741828_1004 (size=1189) 2024-11-24T03:49:30,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741828_1004 (size=1189) 2024-11-24T03:49:30,360 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store 2024-11-24T03:49:30,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741829_1005 (size=34) 2024-11-24T03:49:30,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741829_1005 (size=34) 2024-11-24T03:49:30,369 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:49:30,369 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T03:49:30,369 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:49:30,369 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:49:30,369 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T03:49:30,369 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:49:30,369 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:49:30,369 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732420170369Disabling compacts and flushes for region at 1732420170369Disabling writes for close at 1732420170369Writing region close event to WAL at 1732420170369Closed at 1732420170369 2024-11-24T03:49:30,370 WARN [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/.initializing 2024-11-24T03:49:30,370 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/WALs/71d8d2d6408d,35951,1732420170088 2024-11-24T03:49:30,373 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C35951%2C1732420170088, suffix=, logDir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/WALs/71d8d2d6408d,35951,1732420170088, archiveDir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/oldWALs, maxLogs=10 2024-11-24T03:49:30,373 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C35951%2C1732420170088.1732420170373 2024-11-24T03:49:30,382 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/WALs/71d8d2d6408d,35951,1732420170088/71d8d2d6408d%2C35951%2C1732420170088.1732420170373 2024-11-24T03:49:30,387 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38639:38639),(127.0.0.1/127.0.0.1:37621:37621)] 2024-11-24T03:49:30,388 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:49:30,388 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:49:30,388 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:49:30,388 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:49:30,389 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:49:30,391 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T03:49:30,391 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:30,391 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:49:30,391 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:49:30,393 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T03:49:30,393 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:30,393 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:49:30,394 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:49:30,395 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T03:49:30,395 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:30,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:49:30,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:49:30,397 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T03:49:30,397 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:30,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:49:30,398 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:49:30,399 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:49:30,399 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:49:30,400 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:49:30,400 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:49:30,401 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T03:49:30,402 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:49:30,404 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:49:30,404 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=757340, jitterRate=-0.03699341416358948}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T03:49:30,405 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732420170388Initializing all the Stores at 1732420170389 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420170389Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420170389Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420170389Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420170389Cleaning up temporary data from old regions at 1732420170400 (+11 ms)Region opened successfully at 1732420170405 (+5 ms) 2024-11-24T03:49:30,405 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T03:49:30,408 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bce99a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=71d8d2d6408d/172.17.0.2:0 2024-11-24T03:49:30,409 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T03:49:30,409 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T03:49:30,409 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T03:49:30,409 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T03:49:30,410 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T03:49:30,410 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T03:49:30,410 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T03:49:30,412 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T03:49:30,413 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T03:49:30,436 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T03:49:30,437 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T03:49:30,438 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T03:49:30,445 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T03:49:30,446 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T03:49:30,447 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T03:49:30,453 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T03:49:30,455 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T03:49:30,462 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T03:49:30,464 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T03:49:30,470 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T03:49:30,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T03:49:30,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T03:49:30,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:49:30,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:49:30,480 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=71d8d2d6408d,35951,1732420170088, sessionid=0x1016c3f841f0000, setting cluster-up flag (Was=false) 2024-11-24T03:49:30,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:49:30,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:49:30,520 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T03:49:30,522 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=71d8d2d6408d,35951,1732420170088 2024-11-24T03:49:30,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:49:30,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:49:30,562 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T03:49:30,563 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=71d8d2d6408d,35951,1732420170088 2024-11-24T03:49:30,564 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T03:49:30,565 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T03:49:30,566 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T03:49:30,566 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T03:49:30,566 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 71d8d2d6408d,35951,1732420170088 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T03:49:30,567 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:49:30,567 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:49:30,567 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:49:30,567 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:49:30,567 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/71d8d2d6408d:0, corePoolSize=10, maxPoolSize=10 2024-11-24T03:49:30,568 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:49:30,568 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T03:49:30,568 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:49:30,569 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732420200569 2024-11-24T03:49:30,570 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T03:49:30,570 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T03:49:30,570 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:49:30,570 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T03:49:30,570 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T03:49:30,570 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T03:49:30,570 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T03:49:30,570 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T03:49:30,570 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:30,570 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T03:49:30,571 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T03:49:30,571 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T03:49:30,571 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T03:49:30,571 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:30,571 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T03:49:30,571 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420170571,5,FailOnTimeoutGroup] 2024-11-24T03:49:30,571 INFO [PEWorker-2 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T03:49:30,572 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420170571,5,FailOnTimeoutGroup] 2024-11-24T03:49:30,572 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:30,572 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T03:49:30,572 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:30,572 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:30,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741831_1007 (size=1321) 2024-11-24T03:49:30,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741831_1007 (size=1321) 2024-11-24T03:49:30,579 INFO [PEWorker-2 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T03:49:30,579 INFO [PEWorker-2 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e 2024-11-24T03:49:30,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741832_1008 (size=32) 2024-11-24T03:49:30,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741832_1008 (size=32) 2024-11-24T03:49:30,588 DEBUG [PEWorker-2 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:49:30,589 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T03:49:30,591 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T03:49:30,591 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:30,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:49:30,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T03:49:30,592 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T03:49:30,592 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:30,593 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:49:30,593 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T03:49:30,594 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T03:49:30,594 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:30,594 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:49:30,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T03:49:30,596 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T03:49:30,596 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:30,597 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:49:30,597 DEBUG [PEWorker-2 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T03:49:30,597 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740 2024-11-24T03:49:30,598 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740 2024-11-24T03:49:30,599 DEBUG [PEWorker-2 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T03:49:30,599 DEBUG [PEWorker-2 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T03:49:30,600 DEBUG [PEWorker-2 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T03:49:30,601 DEBUG [PEWorker-2 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T03:49:30,606 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:49:30,607 INFO [PEWorker-2 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=854275, jitterRate=0.08626705408096313}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T03:49:30,608 DEBUG [PEWorker-2 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732420170588Initializing all the Stores at 1732420170589 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420170589Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420170589Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420170589Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420170589Cleaning up temporary data from old regions at 1732420170599 (+10 ms)Region opened successfully at 1732420170608 (+9 ms) 2024-11-24T03:49:30,608 DEBUG [PEWorker-2 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T03:49:30,608 INFO [PEWorker-2 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T03:49:30,608 DEBUG [PEWorker-2 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T03:49:30,608 DEBUG [PEWorker-2 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T03:49:30,608 DEBUG [PEWorker-2 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T03:49:30,609 INFO [PEWorker-2 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T03:49:30,609 DEBUG [PEWorker-2 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732420170608Disabling compacts and flushes for region at 1732420170608Disabling writes for close at 1732420170608Writing region close event to WAL at 1732420170608Closed at 1732420170608 2024-11-24T03:49:30,610 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:49:30,610 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T03:49:30,610 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T03:49:30,611 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T03:49:30,613 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T03:49:30,643 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(746): ClusterId : dd6256f7-9b20-4668-9596-d185cc89ebfb 2024-11-24T03:49:30,643 DEBUG [RS:0;71d8d2d6408d:46657 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T03:49:30,663 DEBUG [RS:0;71d8d2d6408d:46657 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T03:49:30,663 DEBUG [RS:0;71d8d2d6408d:46657 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T03:49:30,671 DEBUG [RS:0;71d8d2d6408d:46657 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T03:49:30,671 DEBUG [RS:0;71d8d2d6408d:46657 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54739124, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=71d8d2d6408d/172.17.0.2:0 2024-11-24T03:49:30,687 DEBUG [RS:0;71d8d2d6408d:46657 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;71d8d2d6408d:46657 2024-11-24T03:49:30,687 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T03:49:30,687 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T03:49:30,687 DEBUG [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T03:49:30,688 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(2659): reportForDuty to master=71d8d2d6408d,35951,1732420170088 with port=46657, startcode=1732420170225 2024-11-24T03:49:30,688 DEBUG [RS:0;71d8d2d6408d:46657 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T03:49:30,690 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38823, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T03:49:30,691 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35951 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:30,691 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35951 {}] master.ServerManager(517): Registering regionserver=71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:30,692 DEBUG [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e 2024-11-24T03:49:30,692 DEBUG [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39181 2024-11-24T03:49:30,692 DEBUG [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T03:49:30,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T03:49:30,704 DEBUG [RS:0;71d8d2d6408d:46657 {}] zookeeper.ZKUtil(111): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:30,704 WARN [RS:0;71d8d2d6408d:46657 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T03:49:30,704 INFO [RS:0;71d8d2d6408d:46657 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:49:30,704 DEBUG [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/WALs/71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:30,704 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [71d8d2d6408d,46657,1732420170225] 2024-11-24T03:49:30,707 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T03:49:30,710 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T03:49:30,711 INFO [RS:0;71d8d2d6408d:46657 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T03:49:30,711 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:30,711 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T03:49:30,712 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T03:49:30,712 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:30,712 DEBUG [RS:0;71d8d2d6408d:46657 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:49:30,712 DEBUG [RS:0;71d8d2d6408d:46657 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:49:30,712 DEBUG [RS:0;71d8d2d6408d:46657 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:49:30,712 DEBUG [RS:0;71d8d2d6408d:46657 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:49:30,712 DEBUG [RS:0;71d8d2d6408d:46657 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:49:30,712 DEBUG [RS:0;71d8d2d6408d:46657 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/71d8d2d6408d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T03:49:30,712 DEBUG [RS:0;71d8d2d6408d:46657 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:49:30,712 DEBUG [RS:0;71d8d2d6408d:46657 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:49:30,712 DEBUG [RS:0;71d8d2d6408d:46657 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:49:30,712 DEBUG [RS:0;71d8d2d6408d:46657 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:49:30,712 DEBUG [RS:0;71d8d2d6408d:46657 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:49:30,713 DEBUG [RS:0;71d8d2d6408d:46657 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:49:30,713 DEBUG [RS:0;71d8d2d6408d:46657 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/71d8d2d6408d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T03:49:30,713 DEBUG [RS:0;71d8d2d6408d:46657 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T03:49:30,713 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:30,713 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:30,713 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:30,713 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:30,713 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:30,713 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,46657,1732420170225-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T03:49:30,729 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T03:49:30,729 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,46657,1732420170225-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:30,729 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:30,729 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.Replication(171): 71d8d2d6408d,46657,1732420170225 started 2024-11-24T03:49:30,745 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:30,745 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(1482): Serving as 71d8d2d6408d,46657,1732420170225, RpcServer on 71d8d2d6408d/172.17.0.2:46657, sessionid=0x1016c3f841f0001 2024-11-24T03:49:30,745 DEBUG [RS:0;71d8d2d6408d:46657 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T03:49:30,746 DEBUG [RS:0;71d8d2d6408d:46657 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:30,746 DEBUG [RS:0;71d8d2d6408d:46657 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '71d8d2d6408d,46657,1732420170225' 2024-11-24T03:49:30,746 DEBUG [RS:0;71d8d2d6408d:46657 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T03:49:30,746 DEBUG [RS:0;71d8d2d6408d:46657 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T03:49:30,746 DEBUG [RS:0;71d8d2d6408d:46657 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T03:49:30,746 DEBUG [RS:0;71d8d2d6408d:46657 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T03:49:30,746 DEBUG [RS:0;71d8d2d6408d:46657 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:30,746 DEBUG [RS:0;71d8d2d6408d:46657 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '71d8d2d6408d,46657,1732420170225' 2024-11-24T03:49:30,746 DEBUG [RS:0;71d8d2d6408d:46657 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T03:49:30,747 DEBUG [RS:0;71d8d2d6408d:46657 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T03:49:30,747 DEBUG [RS:0;71d8d2d6408d:46657 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T03:49:30,747 INFO [RS:0;71d8d2d6408d:46657 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T03:49:30,747 INFO [RS:0;71d8d2d6408d:46657 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T03:49:30,763 WARN [71d8d2d6408d:35951 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T03:49:30,849 INFO [RS:0;71d8d2d6408d:46657 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C46657%2C1732420170225, suffix=, logDir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/WALs/71d8d2d6408d,46657,1732420170225, archiveDir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/oldWALs, maxLogs=32 2024-11-24T03:49:30,849 INFO [RS:0;71d8d2d6408d:46657 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C46657%2C1732420170225.1732420170849 2024-11-24T03:49:30,856 INFO [RS:0;71d8d2d6408d:46657 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/WALs/71d8d2d6408d,46657,1732420170225/71d8d2d6408d%2C46657%2C1732420170225.1732420170849 2024-11-24T03:49:30,857 DEBUG [RS:0;71d8d2d6408d:46657 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38639:38639),(127.0.0.1/127.0.0.1:37621:37621)] 2024-11-24T03:49:31,013 DEBUG [71d8d2d6408d:35951 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T03:49:31,014 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:31,015 INFO [PEWorker-1 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 71d8d2d6408d,46657,1732420170225, state=OPENING 2024-11-24T03:49:31,028 DEBUG [PEWorker-1 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T03:49:31,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:49:31,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:49:31,037 DEBUG [PEWorker-1 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T03:49:31,037 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:49:31,037 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:49:31,037 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=71d8d2d6408d,46657,1732420170225}] 2024-11-24T03:49:31,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:31,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:31,190 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T03:49:31,192 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38365, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T03:49:31,196 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T03:49:31,197 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:49:31,198 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C46657%2C1732420170225.meta, suffix=.meta, logDir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/WALs/71d8d2d6408d,46657,1732420170225, archiveDir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/oldWALs, maxLogs=32 2024-11-24T03:49:31,199 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C46657%2C1732420170225.meta.1732420171199.meta 2024-11-24T03:49:31,207 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/WALs/71d8d2d6408d,46657,1732420170225/71d8d2d6408d%2C46657%2C1732420170225.meta.1732420171199.meta 2024-11-24T03:49:31,208 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38639:38639),(127.0.0.1/127.0.0.1:37621:37621)] 2024-11-24T03:49:31,209 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:49:31,209 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T03:49:31,210 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T03:49:31,210 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T03:49:31,210 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T03:49:31,210 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:49:31,210 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T03:49:31,210 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T03:49:31,211 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T03:49:31,212 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T03:49:31,212 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:31,212 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:49:31,213 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T03:49:31,213 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T03:49:31,213 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:31,213 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:49:31,214 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T03:49:31,214 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T03:49:31,214 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:31,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:49:31,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T03:49:31,215 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T03:49:31,215 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:31,216 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:49:31,216 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T03:49:31,216 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740 2024-11-24T03:49:31,217 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740 2024-11-24T03:49:31,219 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T03:49:31,219 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T03:49:31,219 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T03:49:31,221 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T03:49:31,221 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=712112, jitterRate=-0.09450320899486542}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T03:49:31,221 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T03:49:31,222 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732420171210Writing region info on filesystem at 1732420171210Initializing all the Stores at 1732420171211 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420171211Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420171211Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420171211Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420171211Cleaning up temporary data from old regions at 1732420171219 (+8 ms)Running coprocessor post-open hooks at 1732420171221 (+2 ms)Region opened successfully at 1732420171222 (+1 ms) 2024-11-24T03:49:31,223 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732420171190 2024-11-24T03:49:31,226 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T03:49:31,226 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T03:49:31,226 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:31,227 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 71d8d2d6408d,46657,1732420170225, state=OPEN 2024-11-24T03:49:31,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T03:49:31,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T03:49:31,277 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:31,277 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:49:31,277 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:49:31,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T03:49:31,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=71d8d2d6408d,46657,1732420170225 in 240 msec 2024-11-24T03:49:31,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T03:49:31,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 670 msec 2024-11-24T03:49:31,283 DEBUG [PEWorker-3 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:49:31,283 INFO [PEWorker-3 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T03:49:31,284 DEBUG [PEWorker-3 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T03:49:31,284 DEBUG [PEWorker-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=71d8d2d6408d,46657,1732420170225, seqNum=-1] 2024-11-24T03:49:31,285 DEBUG [PEWorker-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T03:49:31,286 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47239, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T03:49:31,292 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 726 msec 2024-11-24T03:49:31,292 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732420171292, completionTime=-1 2024-11-24T03:49:31,292 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T03:49:31,292 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T03:49:31,294 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T03:49:31,295 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732420231294 2024-11-24T03:49:31,295 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732420291295 2024-11-24T03:49:31,295 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-24T03:49:31,295 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,35951,1732420170088-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:31,295 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,35951,1732420170088-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:31,295 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,35951,1732420170088-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:31,295 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-71d8d2d6408d:35951, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:31,295 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:31,295 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:31,297 DEBUG [master/71d8d2d6408d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T03:49:31,300 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.020sec 2024-11-24T03:49:31,300 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T03:49:31,300 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T03:49:31,300 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T03:49:31,300 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T03:49:31,300 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T03:49:31,300 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,35951,1732420170088-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T03:49:31,300 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,35951,1732420170088-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T03:49:31,302 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T03:49:31,302 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T03:49:31,302 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,35951,1732420170088-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:49:31,344 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33e6fa5b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:49:31,344 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 71d8d2d6408d,35951,-1 for getting cluster id 2024-11-24T03:49:31,344 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T03:49:31,345 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dd6256f7-9b20-4668-9596-d185cc89ebfb' 2024-11-24T03:49:31,346 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T03:49:31,346 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dd6256f7-9b20-4668-9596-d185cc89ebfb" 2024-11-24T03:49:31,346 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4488f2e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:49:31,346 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [71d8d2d6408d,35951,-1] 2024-11-24T03:49:31,346 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T03:49:31,347 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:49:31,348 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56920, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T03:49:31,349 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@621c6e57, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:49:31,349 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T03:49:31,350 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=71d8d2d6408d,46657,1732420170225, seqNum=-1] 2024-11-24T03:49:31,350 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T03:49:31,352 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55264, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T03:49:31,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=71d8d2d6408d,35951,1732420170088 2024-11-24T03:49:31,354 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:49:31,356 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T03:49:31,356 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T03:49:31,357 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 71d8d2d6408d,35951,1732420170088 2024-11-24T03:49:31,357 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4c455bd6 2024-11-24T03:49:31,358 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T03:49:31,359 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56936, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T03:49:31,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35951 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T03:49:31,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35951 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T03:49:31,360 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35951 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T03:49:31,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35951 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-24T03:49:31,362 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T03:49:31,362 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:31,362 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35951 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-24T03:49:31,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35951 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T03:49:31,364 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T03:49:31,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741835_1011 (size=381) 2024-11-24T03:49:31,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741835_1011 (size=381) 2024-11-24T03:49:31,373 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 18651f7951b24351b6deb47b7480bab7, NAME => 'TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e 2024-11-24T03:49:31,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741836_1012 (size=64) 2024-11-24T03:49:31,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741836_1012 (size=64) 2024-11-24T03:49:31,379 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:49:31,380 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 18651f7951b24351b6deb47b7480bab7, disabling compactions & flushes 2024-11-24T03:49:31,380 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. 2024-11-24T03:49:31,380 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. 2024-11-24T03:49:31,380 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. after waiting 0 ms 2024-11-24T03:49:31,380 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. 2024-11-24T03:49:31,380 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. 2024-11-24T03:49:31,380 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 18651f7951b24351b6deb47b7480bab7: Waiting for close lock at 1732420171379Disabling compacts and flushes for region at 1732420171379Disabling writes for close at 1732420171380 (+1 ms)Writing region close event to WAL at 1732420171380Closed at 1732420171380 2024-11-24T03:49:31,381 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T03:49:31,382 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732420171381"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732420171381"}]},"ts":"1732420171381"} 2024-11-24T03:49:31,384 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T03:49:31,385 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T03:49:31,386 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732420171385"}]},"ts":"1732420171385"} 2024-11-24T03:49:31,388 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-24T03:49:31,388 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=18651f7951b24351b6deb47b7480bab7, ASSIGN}] 2024-11-24T03:49:31,389 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=18651f7951b24351b6deb47b7480bab7, ASSIGN 2024-11-24T03:49:31,390 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=18651f7951b24351b6deb47b7480bab7, ASSIGN; state=OFFLINE, location=71d8d2d6408d,46657,1732420170225; forceNewPlan=false, retain=false 2024-11-24T03:49:31,541 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=18651f7951b24351b6deb47b7480bab7, regionState=OPENING, regionLocation=71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:31,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=18651f7951b24351b6deb47b7480bab7, ASSIGN because future has completed 2024-11-24T03:49:31,544 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 18651f7951b24351b6deb47b7480bab7, server=71d8d2d6408d,46657,1732420170225}] 2024-11-24T03:49:31,701 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. 2024-11-24T03:49:31,701 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 18651f7951b24351b6deb47b7480bab7, NAME => 'TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7.', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:49:31,701 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:31,701 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:49:31,701 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:31,701 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:31,703 INFO [StoreOpener-18651f7951b24351b6deb47b7480bab7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:31,704 INFO [StoreOpener-18651f7951b24351b6deb47b7480bab7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 18651f7951b24351b6deb47b7480bab7 columnFamilyName info 2024-11-24T03:49:31,704 DEBUG [StoreOpener-18651f7951b24351b6deb47b7480bab7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:31,705 INFO [StoreOpener-18651f7951b24351b6deb47b7480bab7-1 {}] regionserver.HStore(327): Store=18651f7951b24351b6deb47b7480bab7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:49:31,705 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:31,706 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:31,706 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:31,706 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:31,706 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:31,708 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:31,710 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:49:31,710 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 18651f7951b24351b6deb47b7480bab7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=874925, jitterRate=0.11252550780773163}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T03:49:31,710 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:31,711 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 18651f7951b24351b6deb47b7480bab7: Running coprocessor pre-open hook at 1732420171702Writing region info on filesystem at 1732420171702Initializing all the Stores at 1732420171702Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420171703 (+1 ms)Cleaning up temporary data from old regions at 1732420171706 (+3 ms)Running coprocessor post-open hooks at 1732420171710 (+4 ms)Region opened successfully at 1732420171711 (+1 ms) 2024-11-24T03:49:31,712 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7., pid=6, masterSystemTime=1732420171697 2024-11-24T03:49:31,714 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. 2024-11-24T03:49:31,714 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. 2024-11-24T03:49:31,715 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=18651f7951b24351b6deb47b7480bab7, regionState=OPEN, openSeqNum=2, regionLocation=71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:31,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 18651f7951b24351b6deb47b7480bab7, server=71d8d2d6408d,46657,1732420170225 because future has completed 2024-11-24T03:49:31,720 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T03:49:31,720 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 18651f7951b24351b6deb47b7480bab7, server=71d8d2d6408d,46657,1732420170225 in 174 msec 2024-11-24T03:49:31,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T03:49:31,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=18651f7951b24351b6deb47b7480bab7, ASSIGN in 332 msec 2024-11-24T03:49:31,723 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T03:49:31,723 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732420171723"}]},"ts":"1732420171723"} 2024-11-24T03:49:31,725 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-24T03:49:31,727 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T03:49:31,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 367 msec 2024-11-24T03:49:32,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:32,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:32,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,706 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,706 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,706 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,706 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,707 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,707 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,707 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,725 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:32,733 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:33,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:33,238 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T03:49:33,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,240 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,240 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,240 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,241 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,241 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:33,279 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:34,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:34,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:35,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:35,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:36,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:36,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:36,707 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T03:49:36,708 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-24T03:49:37,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:37,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:37,406 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-24T03:49:37,406 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T03:49:37,407 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T03:49:38,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:38,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:39,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:39,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:40,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:40,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:41,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:41,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:41,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35951 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T03:49:41,442 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-24T03:49:41,442 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-24T03:49:41,444 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-24T03:49:41,444 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. 2024-11-24T03:49:41,447 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7., hostname=71d8d2d6408d,46657,1732420170225, seqNum=2] 2024-11-24T03:49:41,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:41,461 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 18651f7951b24351b6deb47b7480bab7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T03:49:41,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/877fe244022044d687a440e890a7f12c is 1080, key is row0001/info:/1732420181448/Put/seqid=0 2024-11-24T03:49:41,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741837_1013 (size=12509) 2024-11-24T03:49:41,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741837_1013 (size=12509) 2024-11-24T03:49:41,488 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/877fe244022044d687a440e890a7f12c 2024-11-24T03:49:41,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/877fe244022044d687a440e890a7f12c as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/877fe244022044d687a440e890a7f12c 2024-11-24T03:49:41,502 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/877fe244022044d687a440e890a7f12c, entries=7, sequenceid=11, filesize=12.2 K 2024-11-24T03:49:41,503 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for 18651f7951b24351b6deb47b7480bab7 in 42ms, sequenceid=11, compaction requested=false 2024-11-24T03:49:41,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 18651f7951b24351b6deb47b7480bab7: 2024-11-24T03:49:41,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:41,504 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 18651f7951b24351b6deb47b7480bab7 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-11-24T03:49:41,508 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/ddf1212cf8174987825481aaf241da52 is 1080, key is row0008/info:/1732420181462/Put/seqid=0 2024-11-24T03:49:41,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741838_1014 (size=26530) 2024-11-24T03:49:41,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741838_1014 (size=26530) 2024-11-24T03:49:41,516 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/ddf1212cf8174987825481aaf241da52 2024-11-24T03:49:41,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/ddf1212cf8174987825481aaf241da52 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/ddf1212cf8174987825481aaf241da52 2024-11-24T03:49:41,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/ddf1212cf8174987825481aaf241da52, entries=20, sequenceid=34, filesize=25.9 K 2024-11-24T03:49:41,530 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=5.25 KB/5380 for 18651f7951b24351b6deb47b7480bab7 in 27ms, sequenceid=34, compaction requested=false 2024-11-24T03:49:41,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 18651f7951b24351b6deb47b7480bab7: 2024-11-24T03:49:41,530 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=38.1 K, sizeToCheck=16.0 K 2024-11-24T03:49:41,530 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:49:41,530 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/ddf1212cf8174987825481aaf241da52 because midkey is the same as first or last row 2024-11-24T03:49:42,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:42,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:42,910 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T03:49:42,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,958 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,958 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:42,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:43,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:43,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:43,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:43,522 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 18651f7951b24351b6deb47b7480bab7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T03:49:43,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/14c4c0a94df948d6be0f1e6dcce1d73d is 1080, key is row0028/info:/1732420181505/Put/seqid=0 2024-11-24T03:49:43,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741839_1015 (size=12509) 2024-11-24T03:49:43,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741839_1015 (size=12509) 2024-11-24T03:49:43,556 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/14c4c0a94df948d6be0f1e6dcce1d73d 2024-11-24T03:49:43,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/14c4c0a94df948d6be0f1e6dcce1d73d as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/14c4c0a94df948d6be0f1e6dcce1d73d 2024-11-24T03:49:43,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/14c4c0a94df948d6be0f1e6dcce1d73d, entries=7, sequenceid=44, filesize=12.2 K 2024-11-24T03:49:43,576 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for 18651f7951b24351b6deb47b7480bab7 in 54ms, sequenceid=44, compaction requested=true 2024-11-24T03:49:43,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 18651f7951b24351b6deb47b7480bab7: 2024-11-24T03:49:43,576 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=50.3 K, sizeToCheck=16.0 K 2024-11-24T03:49:43,576 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:49:43,576 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/ddf1212cf8174987825481aaf241da52 because midkey is the same as first or last row 2024-11-24T03:49:43,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:43,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18651f7951b24351b6deb47b7480bab7:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T03:49:43,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:49:43,583 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T03:49:43,583 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 18651f7951b24351b6deb47b7480bab7 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-11-24T03:49:43,585 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 51548 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T03:49:43,585 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1541): 18651f7951b24351b6deb47b7480bab7/info is initiating minor compaction (all files) 2024-11-24T03:49:43,585 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 18651f7951b24351b6deb47b7480bab7/info in TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. 2024-11-24T03:49:43,585 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/877fe244022044d687a440e890a7f12c, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/ddf1212cf8174987825481aaf241da52, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/14c4c0a94df948d6be0f1e6dcce1d73d] into tmpdir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp, totalSize=50.3 K 2024-11-24T03:49:43,586 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting 877fe244022044d687a440e890a7f12c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732420181448 2024-11-24T03:49:43,587 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting ddf1212cf8174987825481aaf241da52, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732420181462 2024-11-24T03:49:43,588 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/fbbc30133bb640ad89aeb7761c45845c is 1080, key is row0035/info:/1732420183524/Put/seqid=0 2024-11-24T03:49:43,591 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting 14c4c0a94df948d6be0f1e6dcce1d73d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732420181505 2024-11-24T03:49:43,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46657 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=18651f7951b24351b6deb47b7480bab7, server=71d8d2d6408d,46657,1732420170225 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T03:49:43,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46657 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:55264 deadline: 1732420193610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=18651f7951b24351b6deb47b7480bab7, server=71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:43,617 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18651f7951b24351b6deb47b7480bab7#info#compaction#58 average throughput is 17.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T03:49:43,618 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/4fa4a103492745b6a97d842e603f3975 is 1080, key is row0001/info:/1732420181448/Put/seqid=0 2024-11-24T03:49:43,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741840_1016 (size=27607) 2024-11-24T03:49:43,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741840_1016 (size=27607) 2024-11-24T03:49:43,624 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=68 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/fbbc30133bb640ad89aeb7761c45845c 2024-11-24T03:49:43,633 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/fbbc30133bb640ad89aeb7761c45845c as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/fbbc30133bb640ad89aeb7761c45845c 2024-11-24T03:49:43,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/fbbc30133bb640ad89aeb7761c45845c, entries=21, sequenceid=68, filesize=27.0 K 2024-11-24T03:49:43,641 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=8.41 KB/8608 for 18651f7951b24351b6deb47b7480bab7 in 57ms, sequenceid=68, compaction requested=false 2024-11-24T03:49:43,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 18651f7951b24351b6deb47b7480bab7: 2024-11-24T03:49:43,641 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=77.3 K, sizeToCheck=16.0 K 2024-11-24T03:49:43,641 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:49:43,641 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/fbbc30133bb640ad89aeb7761c45845c because midkey is the same as first or last row 2024-11-24T03:49:43,641 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7., hostname=71d8d2d6408d,46657,1732420170225, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7., hostname=71d8d2d6408d,46657,1732420170225, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=18651f7951b24351b6deb47b7480bab7, server=71d8d2d6408d,46657,1732420170225 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T03:49:43,642 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7., hostname=71d8d2d6408d,46657,1732420170225, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=18651f7951b24351b6deb47b7480bab7, server=71d8d2d6408d,46657,1732420170225 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T03:49:43,642 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7., hostname=71d8d2d6408d,46657,1732420170225, seqNum=2 because the exception is null or not the one we care about 2024-11-24T03:49:43,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741841_1017 (size=41747) 2024-11-24T03:49:43,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741841_1017 (size=41747) 2024-11-24T03:49:43,650 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/4fa4a103492745b6a97d842e603f3975 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/4fa4a103492745b6a97d842e603f3975 2024-11-24T03:49:43,657 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 18651f7951b24351b6deb47b7480bab7/info of 18651f7951b24351b6deb47b7480bab7 into 4fa4a103492745b6a97d842e603f3975(size=40.8 K), total size for store is 67.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T03:49:43,657 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 18651f7951b24351b6deb47b7480bab7: 2024-11-24T03:49:43,657 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7., storeName=18651f7951b24351b6deb47b7480bab7/info, priority=13, startTime=1732420183576; duration=0sec 2024-11-24T03:49:43,657 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.7 K, sizeToCheck=16.0 K 2024-11-24T03:49:43,657 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:49:43,657 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/4fa4a103492745b6a97d842e603f3975 because midkey is the same as first or last row 2024-11-24T03:49:43,657 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.7 K, sizeToCheck=16.0 K 2024-11-24T03:49:43,657 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:49:43,657 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/4fa4a103492745b6a97d842e603f3975 because midkey is the same as first or last row 2024-11-24T03:49:43,657 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.7 K, sizeToCheck=16.0 K 2024-11-24T03:49:43,657 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:49:43,657 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/4fa4a103492745b6a97d842e603f3975 because midkey is the same as first or last row 2024-11-24T03:49:43,657 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:49:43,657 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18651f7951b24351b6deb47b7480bab7:info 2024-11-24T03:49:44,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:44,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:45,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:45,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:46,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:46,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:47,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:47,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:48,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:48,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:49,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:49,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:50,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:50,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:51,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:51,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:52,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:52,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:53,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:53,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:53,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:53,673 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 18651f7951b24351b6deb47b7480bab7 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-24T03:49:53,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/48b793cddf214d618049025d4b35ca76 is 1080, key is row0056/info:/1732420183583/Put/seqid=0 2024-11-24T03:49:53,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741842_1018 (size=14663) 2024-11-24T03:49:53,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741842_1018 (size=14663) 2024-11-24T03:49:53,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/48b793cddf214d618049025d4b35ca76 2024-11-24T03:49:53,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/48b793cddf214d618049025d4b35ca76 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/48b793cddf214d618049025d4b35ca76 2024-11-24T03:49:53,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/48b793cddf214d618049025d4b35ca76, entries=9, sequenceid=81, filesize=14.3 K 2024-11-24T03:49:53,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 18651f7951b24351b6deb47b7480bab7 in 25ms, sequenceid=81, compaction requested=true 2024-11-24T03:49:53,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 18651f7951b24351b6deb47b7480bab7: 2024-11-24T03:49:53,697 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=82.0 K, sizeToCheck=16.0 K 2024-11-24T03:49:53,697 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:49:53,697 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/4fa4a103492745b6a97d842e603f3975 because midkey is the same as first or last row 2024-11-24T03:49:53,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18651f7951b24351b6deb47b7480bab7:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T03:49:53,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:49:53,698 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T03:49:53,699 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84017 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T03:49:53,699 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1541): 18651f7951b24351b6deb47b7480bab7/info is initiating minor compaction (all files) 2024-11-24T03:49:53,699 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 18651f7951b24351b6deb47b7480bab7/info in TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. 2024-11-24T03:49:53,699 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/4fa4a103492745b6a97d842e603f3975, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/fbbc30133bb640ad89aeb7761c45845c, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/48b793cddf214d618049025d4b35ca76] into tmpdir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp, totalSize=82.0 K 2024-11-24T03:49:53,699 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4fa4a103492745b6a97d842e603f3975, keycount=34, bloomtype=ROW, size=40.8 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732420181448 2024-11-24T03:49:53,700 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting fbbc30133bb640ad89aeb7761c45845c, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=68, earliestPutTs=1732420183524 2024-11-24T03:49:53,700 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting 48b793cddf214d618049025d4b35ca76, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732420183583 2024-11-24T03:49:53,714 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18651f7951b24351b6deb47b7480bab7#info#compaction#60 average throughput is 32.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T03:49:53,714 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/d41158420ada4abda01a373cc43607fe is 1080, key is row0001/info:/1732420181448/Put/seqid=0 2024-11-24T03:49:53,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741843_1019 (size=74301) 2024-11-24T03:49:53,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741843_1019 (size=74301) 2024-11-24T03:49:53,727 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/.tmp/info/d41158420ada4abda01a373cc43607fe as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/d41158420ada4abda01a373cc43607fe 2024-11-24T03:49:53,734 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 18651f7951b24351b6deb47b7480bab7/info of 18651f7951b24351b6deb47b7480bab7 into d41158420ada4abda01a373cc43607fe(size=72.6 K), total size for store is 72.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T03:49:53,734 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 18651f7951b24351b6deb47b7480bab7: 2024-11-24T03:49:53,734 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7., storeName=18651f7951b24351b6deb47b7480bab7/info, priority=13, startTime=1732420193697; duration=0sec 2024-11-24T03:49:53,734 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-24T03:49:53,734 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:49:53,734 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-24T03:49:53,734 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:49:53,734 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-24T03:49:53,734 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T03:49:53,735 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:49:53,735 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:49:53,735 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18651f7951b24351b6deb47b7480bab7:info 2024-11-24T03:49:53,737 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35951 {}] assignment.AssignmentManager(1363): Split request from 71d8d2d6408d,46657,1732420170225, parent={ENCODED => 18651f7951b24351b6deb47b7480bab7, NAME => 'TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-24T03:49:53,743 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35951 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:53,747 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35951 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=18651f7951b24351b6deb47b7480bab7, daughterA=3f0ba190cb5e13cac46a5e798061202a, daughterB=ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:49:53,748 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=18651f7951b24351b6deb47b7480bab7, daughterA=3f0ba190cb5e13cac46a5e798061202a, daughterB=ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:49:53,748 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=18651f7951b24351b6deb47b7480bab7, daughterA=3f0ba190cb5e13cac46a5e798061202a, daughterB=ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:49:53,748 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=18651f7951b24351b6deb47b7480bab7, daughterA=3f0ba190cb5e13cac46a5e798061202a, daughterB=ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:49:53,755 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=18651f7951b24351b6deb47b7480bab7, UNASSIGN}] 2024-11-24T03:49:53,756 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=18651f7951b24351b6deb47b7480bab7, UNASSIGN 2024-11-24T03:49:53,758 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=18651f7951b24351b6deb47b7480bab7, regionState=CLOSING, regionLocation=71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:53,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=18651f7951b24351b6deb47b7480bab7, UNASSIGN because future has completed 2024-11-24T03:49:53,761 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-24T03:49:53,761 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 18651f7951b24351b6deb47b7480bab7, server=71d8d2d6408d,46657,1732420170225}] 2024-11-24T03:49:53,918 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:53,918 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-24T03:49:53,919 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 18651f7951b24351b6deb47b7480bab7, disabling compactions & flushes 2024-11-24T03:49:53,919 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. 2024-11-24T03:49:53,919 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. 2024-11-24T03:49:53,919 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. after waiting 0 ms 2024-11-24T03:49:53,919 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. 2024-11-24T03:49:53,920 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/877fe244022044d687a440e890a7f12c, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/ddf1212cf8174987825481aaf241da52, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/4fa4a103492745b6a97d842e603f3975, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/14c4c0a94df948d6be0f1e6dcce1d73d, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/fbbc30133bb640ad89aeb7761c45845c, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/48b793cddf214d618049025d4b35ca76] to archive 2024-11-24T03:49:53,921 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T03:49:53,923 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/877fe244022044d687a440e890a7f12c to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/877fe244022044d687a440e890a7f12c 2024-11-24T03:49:53,924 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/ddf1212cf8174987825481aaf241da52 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/ddf1212cf8174987825481aaf241da52 2024-11-24T03:49:53,925 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/4fa4a103492745b6a97d842e603f3975 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/4fa4a103492745b6a97d842e603f3975 2024-11-24T03:49:53,927 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/14c4c0a94df948d6be0f1e6dcce1d73d to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/14c4c0a94df948d6be0f1e6dcce1d73d 2024-11-24T03:49:53,928 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/fbbc30133bb640ad89aeb7761c45845c to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/fbbc30133bb640ad89aeb7761c45845c 2024-11-24T03:49:53,929 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/48b793cddf214d618049025d4b35ca76 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/48b793cddf214d618049025d4b35ca76 2024-11-24T03:49:53,935 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=1 2024-11-24T03:49:53,936 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. 2024-11-24T03:49:53,936 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 18651f7951b24351b6deb47b7480bab7: Waiting for close lock at 1732420193919Running coprocessor pre-close hooks at 1732420193919Disabling compacts and flushes for region at 1732420193919Disabling writes for close at 1732420193919Writing region close event to WAL at 1732420193932 (+13 ms)Running coprocessor post-close hooks at 1732420193936 (+4 ms)Closed at 1732420193936 2024-11-24T03:49:53,939 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:53,939 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=18651f7951b24351b6deb47b7480bab7, regionState=CLOSED 2024-11-24T03:49:53,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 18651f7951b24351b6deb47b7480bab7, server=71d8d2d6408d,46657,1732420170225 because future has completed 2024-11-24T03:49:53,946 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-24T03:49:53,946 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 18651f7951b24351b6deb47b7480bab7, server=71d8d2d6408d,46657,1732420170225 in 182 msec 2024-11-24T03:49:53,948 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-24T03:49:53,948 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=18651f7951b24351b6deb47b7480bab7, UNASSIGN in 191 msec 2024-11-24T03:49:53,956 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:53,959 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 1 storefiles, region=18651f7951b24351b6deb47b7480bab7, threads=1 2024-11-24T03:49:53,961 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/d41158420ada4abda01a373cc43607fe for region: 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:53,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741844_1020 (size=27) 2024-11-24T03:49:53,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741844_1020 (size=27) 2024-11-24T03:49:53,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741845_1021 (size=27) 2024-11-24T03:49:53,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741845_1021 (size=27) 2024-11-24T03:49:53,988 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/d41158420ada4abda01a373cc43607fe for region: 18651f7951b24351b6deb47b7480bab7 2024-11-24T03:49:53,990 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 18651f7951b24351b6deb47b7480bab7 Daughter A: [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/3f0ba190cb5e13cac46a5e798061202a/info/d41158420ada4abda01a373cc43607fe.18651f7951b24351b6deb47b7480bab7] storefiles, Daughter B: [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/d41158420ada4abda01a373cc43607fe.18651f7951b24351b6deb47b7480bab7] storefiles. 2024-11-24T03:49:54,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741846_1022 (size=71) 2024-11-24T03:49:54,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741846_1022 (size=71) 2024-11-24T03:49:54,006 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:54,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741847_1023 (size=71) 2024-11-24T03:49:54,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741847_1023 (size=71) 2024-11-24T03:49:54,019 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:54,028 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/3f0ba190cb5e13cac46a5e798061202a/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-24T03:49:54,031 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-24T03:49:54,033 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732420194033"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732420194033"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732420194033"}]},"ts":"1732420194033"} 2024-11-24T03:49:54,033 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732420194033"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732420194033"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732420194033"}]},"ts":"1732420194033"} 2024-11-24T03:49:54,034 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732420194033"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732420194033"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732420194033"}]},"ts":"1732420194033"} 2024-11-24T03:49:54,053 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3f0ba190cb5e13cac46a5e798061202a, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ae871af58eaac429b1e8eee60d9d32a1, ASSIGN}] 2024-11-24T03:49:54,054 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3f0ba190cb5e13cac46a5e798061202a, ASSIGN 2024-11-24T03:49:54,054 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ae871af58eaac429b1e8eee60d9d32a1, ASSIGN 2024-11-24T03:49:54,055 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ae871af58eaac429b1e8eee60d9d32a1, ASSIGN; state=SPLITTING_NEW, location=71d8d2d6408d,46657,1732420170225; forceNewPlan=false, retain=false 2024-11-24T03:49:54,055 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3f0ba190cb5e13cac46a5e798061202a, ASSIGN; state=SPLITTING_NEW, location=71d8d2d6408d,46657,1732420170225; forceNewPlan=false, retain=false 2024-11-24T03:49:54,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:54,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:54,207 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=ae871af58eaac429b1e8eee60d9d32a1, regionState=OPENING, regionLocation=71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:54,208 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=3f0ba190cb5e13cac46a5e798061202a, regionState=OPENING, regionLocation=71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:54,210 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ae871af58eaac429b1e8eee60d9d32a1, ASSIGN because future has completed 2024-11-24T03:49:54,210 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure ae871af58eaac429b1e8eee60d9d32a1, server=71d8d2d6408d,46657,1732420170225}] 2024-11-24T03:49:54,211 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3f0ba190cb5e13cac46a5e798061202a, ASSIGN because future has completed 2024-11-24T03:49:54,211 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3f0ba190cb5e13cac46a5e798061202a, server=71d8d2d6408d,46657,1732420170225}] 2024-11-24T03:49:54,366 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:49:54,366 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => ae871af58eaac429b1e8eee60d9d32a1, NAME => 'TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-24T03:49:54,366 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:49:54,367 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:49:54,367 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:49:54,367 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:49:54,368 INFO [StoreOpener-ae871af58eaac429b1e8eee60d9d32a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:49:54,369 INFO [StoreOpener-ae871af58eaac429b1e8eee60d9d32a1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ae871af58eaac429b1e8eee60d9d32a1 columnFamilyName info 2024-11-24T03:49:54,369 DEBUG [StoreOpener-ae871af58eaac429b1e8eee60d9d32a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:54,379 DEBUG [StoreOpener-ae871af58eaac429b1e8eee60d9d32a1-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/d41158420ada4abda01a373cc43607fe.18651f7951b24351b6deb47b7480bab7->hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/d41158420ada4abda01a373cc43607fe-top 2024-11-24T03:49:54,380 INFO [StoreOpener-ae871af58eaac429b1e8eee60d9d32a1-1 {}] regionserver.HStore(327): Store=ae871af58eaac429b1e8eee60d9d32a1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:49:54,380 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:49:54,381 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:49:54,382 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:49:54,382 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:49:54,382 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:49:54,384 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:49:54,385 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened ae871af58eaac429b1e8eee60d9d32a1; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=839889, jitterRate=0.06797502934932709}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T03:49:54,385 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:49:54,385 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for ae871af58eaac429b1e8eee60d9d32a1: Running coprocessor pre-open hook at 1732420194367Writing region info on filesystem at 1732420194367Initializing all the Stores at 1732420194368 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420194368Cleaning up temporary data from old regions at 1732420194382 (+14 ms)Running coprocessor post-open hooks at 1732420194385 (+3 ms)Region opened successfully at 1732420194385 2024-11-24T03:49:54,386 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1., pid=12, masterSystemTime=1732420194362 2024-11-24T03:49:54,386 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store ae871af58eaac429b1e8eee60d9d32a1:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T03:49:54,386 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:49:54,386 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-24T03:49:54,387 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:49:54,387 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1541): ae871af58eaac429b1e8eee60d9d32a1/info is initiating minor compaction (all files) 2024-11-24T03:49:54,387 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ae871af58eaac429b1e8eee60d9d32a1/info in TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:49:54,387 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/d41158420ada4abda01a373cc43607fe.18651f7951b24351b6deb47b7480bab7->hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/d41158420ada4abda01a373cc43607fe-top] into tmpdir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp, totalSize=72.6 K 2024-11-24T03:49:54,388 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting d41158420ada4abda01a373cc43607fe.18651f7951b24351b6deb47b7480bab7, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732420181448 2024-11-24T03:49:54,388 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:49:54,389 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:49:54,389 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a. 2024-11-24T03:49:54,389 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 3f0ba190cb5e13cac46a5e798061202a, NAME => 'TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-24T03:49:54,389 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 3f0ba190cb5e13cac46a5e798061202a 2024-11-24T03:49:54,389 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:49:54,389 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 3f0ba190cb5e13cac46a5e798061202a 2024-11-24T03:49:54,389 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 3f0ba190cb5e13cac46a5e798061202a 2024-11-24T03:49:54,390 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=ae871af58eaac429b1e8eee60d9d32a1, regionState=OPEN, openSeqNum=86, regionLocation=71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:54,392 INFO [StoreOpener-3f0ba190cb5e13cac46a5e798061202a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3f0ba190cb5e13cac46a5e798061202a 2024-11-24T03:49:54,392 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-24T03:49:54,392 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-24T03:49:54,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-24T03:49:54,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure ae871af58eaac429b1e8eee60d9d32a1, server=71d8d2d6408d,46657,1732420170225 because future has completed 2024-11-24T03:49:54,394 INFO [StoreOpener-3f0ba190cb5e13cac46a5e798061202a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f0ba190cb5e13cac46a5e798061202a columnFamilyName info 2024-11-24T03:49:54,394 DEBUG [StoreOpener-3f0ba190cb5e13cac46a5e798061202a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:49:54,398 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-24T03:49:54,398 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure ae871af58eaac429b1e8eee60d9d32a1, server=71d8d2d6408d,46657,1732420170225 in 185 msec 2024-11-24T03:49:54,400 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ae871af58eaac429b1e8eee60d9d32a1, ASSIGN in 346 msec 2024-11-24T03:49:54,405 DEBUG [StoreOpener-3f0ba190cb5e13cac46a5e798061202a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/3f0ba190cb5e13cac46a5e798061202a/info/d41158420ada4abda01a373cc43607fe.18651f7951b24351b6deb47b7480bab7->hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/d41158420ada4abda01a373cc43607fe-bottom 2024-11-24T03:49:54,406 INFO [StoreOpener-3f0ba190cb5e13cac46a5e798061202a-1 {}] regionserver.HStore(327): Store=3f0ba190cb5e13cac46a5e798061202a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:49:54,406 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 3f0ba190cb5e13cac46a5e798061202a 2024-11-24T03:49:54,407 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/3f0ba190cb5e13cac46a5e798061202a 2024-11-24T03:49:54,409 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/3f0ba190cb5e13cac46a5e798061202a 2024-11-24T03:49:54,409 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ae871af58eaac429b1e8eee60d9d32a1#info#compaction#61 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T03:49:54,409 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 3f0ba190cb5e13cac46a5e798061202a 2024-11-24T03:49:54,409 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 3f0ba190cb5e13cac46a5e798061202a 2024-11-24T03:49:54,410 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/e8ad2fec95544cf09a8d6602ee768e31 is 1080, key is row0062/info:/1732420183604/Put/seqid=0 2024-11-24T03:49:54,411 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 3f0ba190cb5e13cac46a5e798061202a 2024-11-24T03:49:54,414 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 3f0ba190cb5e13cac46a5e798061202a; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769829, jitterRate=-0.02111184597015381}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T03:49:54,414 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3f0ba190cb5e13cac46a5e798061202a 2024-11-24T03:49:54,414 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 3f0ba190cb5e13cac46a5e798061202a: Running coprocessor pre-open hook at 1732420194389Writing region info on filesystem at 1732420194389Initializing all the Stores at 1732420194390 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420194390Cleaning up temporary data from old regions at 1732420194409 (+19 ms)Running coprocessor post-open hooks at 1732420194414 (+5 ms)Region opened successfully at 1732420194414 2024-11-24T03:49:54,415 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a., pid=13, masterSystemTime=1732420194362 2024-11-24T03:49:54,415 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 3f0ba190cb5e13cac46a5e798061202a:info, priority=-2147483648, current under compaction store size is 2 2024-11-24T03:49:54,415 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-24T03:49:54,415 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-24T03:49:54,416 INFO [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a. 2024-11-24T03:49:54,416 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HStore(1541): 3f0ba190cb5e13cac46a5e798061202a/info is initiating minor compaction (all files) 2024-11-24T03:49:54,416 INFO [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3f0ba190cb5e13cac46a5e798061202a/info in TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a. 2024-11-24T03:49:54,417 INFO [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/3f0ba190cb5e13cac46a5e798061202a/info/d41158420ada4abda01a373cc43607fe.18651f7951b24351b6deb47b7480bab7->hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/d41158420ada4abda01a373cc43607fe-bottom] into tmpdir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/3f0ba190cb5e13cac46a5e798061202a/.tmp, totalSize=72.6 K 2024-11-24T03:49:54,418 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] compactions.Compactor(225): Compacting d41158420ada4abda01a373cc43607fe.18651f7951b24351b6deb47b7480bab7, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732420181448 2024-11-24T03:49:54,420 DEBUG [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a. 2024-11-24T03:49:54,421 INFO [RS_OPEN_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a. 2024-11-24T03:49:54,421 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=3f0ba190cb5e13cac46a5e798061202a, regionState=OPEN, openSeqNum=86, regionLocation=71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:54,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3f0ba190cb5e13cac46a5e798061202a, server=71d8d2d6408d,46657,1732420170225 because future has completed 2024-11-24T03:49:54,425 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/.tmp/info/ec45896ca1e14c9f9c8a31ad83f9cda2 is 193, key is TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1./info:regioninfo/1732420194390/Put/seqid=0 2024-11-24T03:49:54,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741848_1024 (size=8260) 2024-11-24T03:49:54,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741848_1024 (size=8260) 2024-11-24T03:49:54,434 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-24T03:49:54,434 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 3f0ba190cb5e13cac46a5e798061202a, server=71d8d2d6408d,46657,1732420170225 in 219 msec 2024-11-24T03:49:54,438 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-24T03:49:54,438 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3f0ba190cb5e13cac46a5e798061202a, ASSIGN in 382 msec 2024-11-24T03:49:54,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741849_1025 (size=9882) 2024-11-24T03:49:54,440 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/e8ad2fec95544cf09a8d6602ee768e31 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e8ad2fec95544cf09a8d6602ee768e31 2024-11-24T03:49:54,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741849_1025 (size=9882) 2024-11-24T03:49:54,441 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=18651f7951b24351b6deb47b7480bab7, daughterA=3f0ba190cb5e13cac46a5e798061202a, daughterB=ae871af58eaac429b1e8eee60d9d32a1 in 695 msec 2024-11-24T03:49:54,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/.tmp/info/ec45896ca1e14c9f9c8a31ad83f9cda2 2024-11-24T03:49:54,447 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in ae871af58eaac429b1e8eee60d9d32a1/info of ae871af58eaac429b1e8eee60d9d32a1 into e8ad2fec95544cf09a8d6602ee768e31(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T03:49:54,448 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:49:54,448 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1., storeName=ae871af58eaac429b1e8eee60d9d32a1/info, priority=15, startTime=1732420194386; duration=0sec 2024-11-24T03:49:54,448 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:49:54,448 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ae871af58eaac429b1e8eee60d9d32a1:info 2024-11-24T03:49:54,449 INFO [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f0ba190cb5e13cac46a5e798061202a#info#compaction#63 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T03:49:54,449 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/3f0ba190cb5e13cac46a5e798061202a/.tmp/info/1320aeabb87e45bb8e8615106846cbbf is 1080, key is row0001/info:/1732420181448/Put/seqid=0 2024-11-24T03:49:54,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/.tmp/ns/fbccdb371dd74a188dfa6362a456cc12 is 43, key is default/ns:d/1732420171287/Put/seqid=0 2024-11-24T03:49:54,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741850_1026 (size=70862) 2024-11-24T03:49:54,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741850_1026 (size=70862) 2024-11-24T03:49:54,475 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/3f0ba190cb5e13cac46a5e798061202a/.tmp/info/1320aeabb87e45bb8e8615106846cbbf as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/3f0ba190cb5e13cac46a5e798061202a/info/1320aeabb87e45bb8e8615106846cbbf 2024-11-24T03:49:54,484 INFO [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 3f0ba190cb5e13cac46a5e798061202a/info of 3f0ba190cb5e13cac46a5e798061202a into 1320aeabb87e45bb8e8615106846cbbf(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T03:49:54,484 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3f0ba190cb5e13cac46a5e798061202a: 2024-11-24T03:49:54,484 INFO [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a., storeName=3f0ba190cb5e13cac46a5e798061202a/info, priority=15, startTime=1732420194415; duration=0sec 2024-11-24T03:49:54,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741851_1027 (size=5153) 2024-11-24T03:49:54,484 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:49:54,484 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f0ba190cb5e13cac46a5e798061202a:info 2024-11-24T03:49:54,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741851_1027 (size=5153) 2024-11-24T03:49:54,485 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/.tmp/ns/fbccdb371dd74a188dfa6362a456cc12 2024-11-24T03:49:54,509 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/.tmp/table/e6e19c43cee9480e8ffdf77d00b74ce9 is 65, key is TestLogRolling-testLogRolling/table:state/1732420171723/Put/seqid=0 2024-11-24T03:49:54,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741852_1028 (size=5340) 2024-11-24T03:49:54,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741852_1028 (size=5340) 2024-11-24T03:49:54,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/.tmp/table/e6e19c43cee9480e8ffdf77d00b74ce9 2024-11-24T03:49:54,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/.tmp/info/ec45896ca1e14c9f9c8a31ad83f9cda2 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/info/ec45896ca1e14c9f9c8a31ad83f9cda2 2024-11-24T03:49:54,526 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/info/ec45896ca1e14c9f9c8a31ad83f9cda2, entries=30, sequenceid=17, filesize=9.7 K 2024-11-24T03:49:54,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/.tmp/ns/fbccdb371dd74a188dfa6362a456cc12 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/ns/fbccdb371dd74a188dfa6362a456cc12 2024-11-24T03:49:54,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/ns/fbccdb371dd74a188dfa6362a456cc12, entries=2, sequenceid=17, filesize=5.0 K 2024-11-24T03:49:54,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/.tmp/table/e6e19c43cee9480e8ffdf77d00b74ce9 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/table/e6e19c43cee9480e8ffdf77d00b74ce9 2024-11-24T03:49:54,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/table/e6e19c43cee9480e8ffdf77d00b74ce9, entries=2, sequenceid=17, filesize=5.2 K 2024-11-24T03:49:54,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 148ms, sequenceid=17, compaction requested=false 2024-11-24T03:49:54,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T03:49:55,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:55,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:55,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46657 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:55264 deadline: 1732420205674, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. is not online on 71d8d2d6408d,46657,1732420170225 2024-11-24T03:49:55,675 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7., hostname=71d8d2d6408d,46657,1732420170225, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7., hostname=71d8d2d6408d,46657,1732420170225, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. is not online on 71d8d2d6408d,46657,1732420170225 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T03:49:55,675 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7., hostname=71d8d2d6408d,46657,1732420170225, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7. is not online on 71d8d2d6408d,46657,1732420170225 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T03:49:55,675 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732420171359.18651f7951b24351b6deb47b7480bab7., hostname=71d8d2d6408d,46657,1732420170225, seqNum=2 from cache 2024-11-24T03:49:56,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:56,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:57,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:57,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:58,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:58,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:58,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:58,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:59,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:49:59,481 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T03:49:59,483 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,483 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,483 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,483 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,485 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,485 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,516 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,521 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,521 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,522 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:49:59,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T03:50:00,072 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T03:50:00,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:00,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:01,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:01,089 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:02,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:02,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:03,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:03,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:04,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:04,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:05,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:05,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:05,729 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1., hostname=71d8d2d6408d,46657,1732420170225, seqNum=86] 2024-11-24T03:50:05,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:05,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae871af58eaac429b1e8eee60d9d32a1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T03:50:05,756 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/1f95873a356c4d17897c97ea11a41f90 is 1080, key is row0065/info:/1732420205730/Put/seqid=0 2024-11-24T03:50:05,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741853_1029 (size=12509) 2024-11-24T03:50:05,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741853_1029 (size=12509) 2024-11-24T03:50:05,775 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/1f95873a356c4d17897c97ea11a41f90 2024-11-24T03:50:05,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/1f95873a356c4d17897c97ea11a41f90 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/1f95873a356c4d17897c97ea11a41f90 2024-11-24T03:50:05,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/1f95873a356c4d17897c97ea11a41f90, entries=7, sequenceid=96, filesize=12.2 K 2024-11-24T03:50:05,799 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for ae871af58eaac429b1e8eee60d9d32a1 in 49ms, sequenceid=96, compaction requested=false 2024-11-24T03:50:05,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:05,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:05,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae871af58eaac429b1e8eee60d9d32a1 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T03:50:05,810 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/4face96583f14011a43ff4e80c054054 is 1080, key is row0072/info:/1732420205755/Put/seqid=0 2024-11-24T03:50:05,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741854_1030 (size=16817) 2024-11-24T03:50:05,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741854_1030 (size=16817) 2024-11-24T03:50:05,829 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=110 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/4face96583f14011a43ff4e80c054054 2024-11-24T03:50:05,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/4face96583f14011a43ff4e80c054054 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/4face96583f14011a43ff4e80c054054 2024-11-24T03:50:05,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/4face96583f14011a43ff4e80c054054, entries=11, sequenceid=110, filesize=16.4 K 2024-11-24T03:50:05,845 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=12.61 KB/12912 for ae871af58eaac429b1e8eee60d9d32a1 in 42ms, sequenceid=110, compaction requested=true 2024-11-24T03:50:05,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:05,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ae871af58eaac429b1e8eee60d9d32a1:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T03:50:05,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-24T03:50:05,846 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T03:50:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:05,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae871af58eaac429b1e8eee60d9d32a1 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-24T03:50:05,848 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37586 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T03:50:05,848 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HStore(1541): ae871af58eaac429b1e8eee60d9d32a1/info is initiating minor compaction (all files) 2024-11-24T03:50:05,848 INFO [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ae871af58eaac429b1e8eee60d9d32a1/info in TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:50:05,848 INFO [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e8ad2fec95544cf09a8d6602ee768e31, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/1f95873a356c4d17897c97ea11a41f90, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/4face96583f14011a43ff4e80c054054] into tmpdir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp, totalSize=36.7 K 2024-11-24T03:50:05,849 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] compactions.Compactor(225): Compacting e8ad2fec95544cf09a8d6602ee768e31, keycount=3, bloomtype=ROW, size=8.1 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732420183604 2024-11-24T03:50:05,850 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] compactions.Compactor(225): Compacting 1f95873a356c4d17897c97ea11a41f90, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732420205730 2024-11-24T03:50:05,850 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] compactions.Compactor(225): Compacting 4face96583f14011a43ff4e80c054054, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1732420205755 2024-11-24T03:50:05,852 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/5a863ee02dd8423880a0634eecd50816 is 1080, key is row0083/info:/1732420205805/Put/seqid=0 2024-11-24T03:50:05,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741855_1031 (size=18987) 2024-11-24T03:50:05,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741855_1031 (size=18987) 2024-11-24T03:50:05,858 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/5a863ee02dd8423880a0634eecd50816 2024-11-24T03:50:05,865 INFO [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ae871af58eaac429b1e8eee60d9d32a1#info#compaction#69 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T03:50:05,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/5a863ee02dd8423880a0634eecd50816 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/5a863ee02dd8423880a0634eecd50816 2024-11-24T03:50:05,865 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/a3ee23df246949f0887408962da87445 is 1080, key is row0062/info:/1732420183604/Put/seqid=0 2024-11-24T03:50:05,872 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/5a863ee02dd8423880a0634eecd50816, entries=13, sequenceid=126, filesize=18.5 K 2024-11-24T03:50:05,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741856_1032 (size=27778) 2024-11-24T03:50:05,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741856_1032 (size=27778) 2024-11-24T03:50:05,873 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=1.05 KB/1076 for ae871af58eaac429b1e8eee60d9d32a1 in 27ms, sequenceid=126, compaction requested=false 2024-11-24T03:50:05,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:05,879 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/a3ee23df246949f0887408962da87445 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/a3ee23df246949f0887408962da87445 2024-11-24T03:50:05,885 INFO [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ae871af58eaac429b1e8eee60d9d32a1/info of ae871af58eaac429b1e8eee60d9d32a1 into a3ee23df246949f0887408962da87445(size=27.1 K), total size for store is 45.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T03:50:05,885 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:05,885 INFO [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1., storeName=ae871af58eaac429b1e8eee60d9d32a1/info, priority=13, startTime=1732420205846; duration=0sec 2024-11-24T03:50:05,886 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:50:05,886 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ae871af58eaac429b1e8eee60d9d32a1:info 2024-11-24T03:50:06,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:06,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:07,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:07,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:07,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:07,864 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae871af58eaac429b1e8eee60d9d32a1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T03:50:07,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/809e147b16ee40e6ae2ea4f5f878c128 is 1080, key is row0096/info:/1732420205847/Put/seqid=0 2024-11-24T03:50:07,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741857_1033 (size=12516) 2024-11-24T03:50:07,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741857_1033 (size=12516) 2024-11-24T03:50:07,873 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/809e147b16ee40e6ae2ea4f5f878c128 2024-11-24T03:50:07,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/809e147b16ee40e6ae2ea4f5f878c128 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/809e147b16ee40e6ae2ea4f5f878c128 2024-11-24T03:50:07,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/809e147b16ee40e6ae2ea4f5f878c128, entries=7, sequenceid=137, filesize=12.2 K 2024-11-24T03:50:07,886 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8608 for ae871af58eaac429b1e8eee60d9d32a1 in 22ms, sequenceid=137, compaction requested=true 2024-11-24T03:50:07,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:07,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ae871af58eaac429b1e8eee60d9d32a1:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T03:50:07,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:50:07,887 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T03:50:07,888 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 59281 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T03:50:07,888 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1541): ae871af58eaac429b1e8eee60d9d32a1/info is initiating minor compaction (all files) 2024-11-24T03:50:07,888 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ae871af58eaac429b1e8eee60d9d32a1/info in TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:50:07,888 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/a3ee23df246949f0887408962da87445, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/5a863ee02dd8423880a0634eecd50816, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/809e147b16ee40e6ae2ea4f5f878c128] into tmpdir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp, totalSize=57.9 K 2024-11-24T03:50:07,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:07,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae871af58eaac429b1e8eee60d9d32a1 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-24T03:50:07,889 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting a3ee23df246949f0887408962da87445, keycount=21, bloomtype=ROW, size=27.1 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1732420183604 2024-11-24T03:50:07,889 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5a863ee02dd8423880a0634eecd50816, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732420205805 2024-11-24T03:50:07,890 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting 809e147b16ee40e6ae2ea4f5f878c128, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732420205847 2024-11-24T03:50:07,893 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/1dc967f9c0894e398ec3f534487450be is 1080, key is row0103/info:/1732420207865/Put/seqid=0 2024-11-24T03:50:07,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741858_1034 (size=15750) 2024-11-24T03:50:07,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741858_1034 (size=15750) 2024-11-24T03:50:07,900 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=150 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/1dc967f9c0894e398ec3f534487450be 2024-11-24T03:50:07,908 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ae871af58eaac429b1e8eee60d9d32a1#info#compaction#72 average throughput is 21.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T03:50:07,909 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/c495ab73cc224f54b0548641f27ddb17 is 1080, key is row0062/info:/1732420183604/Put/seqid=0 2024-11-24T03:50:07,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/1dc967f9c0894e398ec3f534487450be as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/1dc967f9c0894e398ec3f534487450be 2024-11-24T03:50:07,923 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/1dc967f9c0894e398ec3f534487450be, entries=10, sequenceid=150, filesize=15.4 K 2024-11-24T03:50:07,924 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=13.66 KB/13988 for ae871af58eaac429b1e8eee60d9d32a1 in 36ms, sequenceid=150, compaction requested=false 2024-11-24T03:50:07,924 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:07,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:07,926 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae871af58eaac429b1e8eee60d9d32a1 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-24T03:50:07,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741859_1035 (size=49463) 2024-11-24T03:50:07,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741859_1035 (size=49463) 2024-11-24T03:50:07,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/ed9b6b32b429468d9e8d4f6c71f824a6 is 1080, key is row0113/info:/1732420207890/Put/seqid=0 2024-11-24T03:50:07,935 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/c495ab73cc224f54b0548641f27ddb17 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/c495ab73cc224f54b0548641f27ddb17 2024-11-24T03:50:07,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741860_1036 (size=20078) 2024-11-24T03:50:07,938 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/ed9b6b32b429468d9e8d4f6c71f824a6 2024-11-24T03:50:07,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741860_1036 (size=20078) 2024-11-24T03:50:07,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/ed9b6b32b429468d9e8d4f6c71f824a6 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/ed9b6b32b429468d9e8d4f6c71f824a6 2024-11-24T03:50:07,946 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ae871af58eaac429b1e8eee60d9d32a1/info of ae871af58eaac429b1e8eee60d9d32a1 into c495ab73cc224f54b0548641f27ddb17(size=48.3 K), total size for store is 63.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T03:50:07,946 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:07,946 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1., storeName=ae871af58eaac429b1e8eee60d9d32a1/info, priority=13, startTime=1732420207886; duration=0sec 2024-11-24T03:50:07,946 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:50:07,946 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ae871af58eaac429b1e8eee60d9d32a1:info 2024-11-24T03:50:07,952 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/ed9b6b32b429468d9e8d4f6c71f824a6, entries=14, sequenceid=167, filesize=19.6 K 2024-11-24T03:50:07,953 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=2.10 KB/2152 for ae871af58eaac429b1e8eee60d9d32a1 in 27ms, sequenceid=167, compaction requested=true 2024-11-24T03:50:07,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:07,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ae871af58eaac429b1e8eee60d9d32a1:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T03:50:07,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:50:07,954 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T03:50:07,955 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85291 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T03:50:07,955 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1541): ae871af58eaac429b1e8eee60d9d32a1/info is initiating minor compaction (all files) 2024-11-24T03:50:07,955 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ae871af58eaac429b1e8eee60d9d32a1/info in TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:50:07,955 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/c495ab73cc224f54b0548641f27ddb17, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/1dc967f9c0894e398ec3f534487450be, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/ed9b6b32b429468d9e8d4f6c71f824a6] into tmpdir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp, totalSize=83.3 K 2024-11-24T03:50:07,956 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting c495ab73cc224f54b0548641f27ddb17, keycount=41, bloomtype=ROW, size=48.3 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732420183604 2024-11-24T03:50:07,956 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1dc967f9c0894e398ec3f534487450be, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=150, earliestPutTs=1732420207865 2024-11-24T03:50:07,957 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting ed9b6b32b429468d9e8d4f6c71f824a6, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732420207890 2024-11-24T03:50:07,968 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ae871af58eaac429b1e8eee60d9d32a1#info#compaction#74 average throughput is 66.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T03:50:07,969 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/51fb563d797a4cfabe04fd7d45ef9271 is 1080, key is row0062/info:/1732420183604/Put/seqid=0 2024-11-24T03:50:07,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741861_1037 (size=75578) 2024-11-24T03:50:07,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741861_1037 (size=75578) 2024-11-24T03:50:07,982 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/51fb563d797a4cfabe04fd7d45ef9271 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/51fb563d797a4cfabe04fd7d45ef9271 2024-11-24T03:50:07,990 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ae871af58eaac429b1e8eee60d9d32a1/info of ae871af58eaac429b1e8eee60d9d32a1 into 51fb563d797a4cfabe04fd7d45ef9271(size=73.8 K), total size for store is 73.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T03:50:07,990 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:07,990 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1., storeName=ae871af58eaac429b1e8eee60d9d32a1/info, priority=13, startTime=1732420207954; duration=0sec 2024-11-24T03:50:07,990 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:50:07,990 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ae871af58eaac429b1e8eee60d9d32a1:info 2024-11-24T03:50:08,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:08,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:09,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:09,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:09,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:09,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae871af58eaac429b1e8eee60d9d32a1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T03:50:09,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/686e29d3047441d18d7818c4aedd6067 is 1080, key is row0127/info:/1732420207928/Put/seqid=0 2024-11-24T03:50:09,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741862_1038 (size=12516) 2024-11-24T03:50:09,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741862_1038 (size=12516) 2024-11-24T03:50:09,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46657 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=ae871af58eaac429b1e8eee60d9d32a1, server=71d8d2d6408d,46657,1732420170225 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-24T03:50:09,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46657 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:55264 deadline: 1732420219981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=ae871af58eaac429b1e8eee60d9d32a1, server=71d8d2d6408d,46657,1732420170225 2024-11-24T03:50:09,983 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1., hostname=71d8d2d6408d,46657,1732420170225, seqNum=86 , the old value is region=TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1., hostname=71d8d2d6408d,46657,1732420170225, seqNum=86, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=ae871af58eaac429b1e8eee60d9d32a1, server=71d8d2d6408d,46657,1732420170225 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T03:50:09,983 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1., hostname=71d8d2d6408d,46657,1732420170225, seqNum=86 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=ae871af58eaac429b1e8eee60d9d32a1, server=71d8d2d6408d,46657,1732420170225 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T03:50:09,983 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1., hostname=71d8d2d6408d,46657,1732420170225, seqNum=86 because the exception is null or not the one we care about 2024-11-24T03:50:10,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:10,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:10,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/686e29d3047441d18d7818c4aedd6067 2024-11-24T03:50:10,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/686e29d3047441d18d7818c4aedd6067 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/686e29d3047441d18d7818c4aedd6067 2024-11-24T03:50:10,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/686e29d3047441d18d7818c4aedd6067, entries=7, sequenceid=179, filesize=12.2 K 2024-11-24T03:50:10,362 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for ae871af58eaac429b1e8eee60d9d32a1 in 422ms, sequenceid=179, compaction requested=false 2024-11-24T03:50:10,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:11,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:11,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:11,304 INFO [master/71d8d2d6408d:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T03:50:11,304 INFO [master/71d8d2d6408d:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T03:50:12,089 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:12,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:13,089 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:13,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:14,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:14,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:15,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:15,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:16,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:16,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:16,210 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-24T03:50:17,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:17,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:18,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:18,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:19,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:19,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:20,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:20,043 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae871af58eaac429b1e8eee60d9d32a1 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-24T03:50:20,047 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/a8f7e29dfa5d4376bf969c835c24190a is 1080, key is row0134/info:/1732420209941/Put/seqid=0 2024-11-24T03:50:20,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741863_1039 (size=29784) 2024-11-24T03:50:20,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741863_1039 (size=29784) 2024-11-24T03:50:20,053 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/a8f7e29dfa5d4376bf969c835c24190a 2024-11-24T03:50:20,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/a8f7e29dfa5d4376bf969c835c24190a as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/a8f7e29dfa5d4376bf969c835c24190a 2024-11-24T03:50:20,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/a8f7e29dfa5d4376bf969c835c24190a, entries=23, sequenceid=205, filesize=29.1 K 2024-11-24T03:50:20,064 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=4.20 KB/4304 for ae871af58eaac429b1e8eee60d9d32a1 in 22ms, sequenceid=205, compaction requested=true 2024-11-24T03:50:20,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:20,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ae871af58eaac429b1e8eee60d9d32a1:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T03:50:20,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:50:20,064 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T03:50:20,065 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 117878 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T03:50:20,065 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1541): ae871af58eaac429b1e8eee60d9d32a1/info is initiating minor compaction (all files) 2024-11-24T03:50:20,066 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ae871af58eaac429b1e8eee60d9d32a1/info in TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:50:20,066 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/51fb563d797a4cfabe04fd7d45ef9271, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/686e29d3047441d18d7818c4aedd6067, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/a8f7e29dfa5d4376bf969c835c24190a] into tmpdir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp, totalSize=115.1 K 2024-11-24T03:50:20,066 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting 51fb563d797a4cfabe04fd7d45ef9271, keycount=65, bloomtype=ROW, size=73.8 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732420183604 2024-11-24T03:50:20,066 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting 686e29d3047441d18d7818c4aedd6067, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1732420207928 2024-11-24T03:50:20,067 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting a8f7e29dfa5d4376bf969c835c24190a, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732420209941 2024-11-24T03:50:20,078 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ae871af58eaac429b1e8eee60d9d32a1#info#compaction#77 average throughput is 32.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T03:50:20,079 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/d3f901a256cf48b683a1261c9536fb0f is 1080, key is row0062/info:/1732420183604/Put/seqid=0 2024-11-24T03:50:20,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741864_1040 (size=108028) 2024-11-24T03:50:20,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741864_1040 (size=108028) 2024-11-24T03:50:20,088 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/d3f901a256cf48b683a1261c9536fb0f as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/d3f901a256cf48b683a1261c9536fb0f 2024-11-24T03:50:20,094 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ae871af58eaac429b1e8eee60d9d32a1/info of ae871af58eaac429b1e8eee60d9d32a1 into d3f901a256cf48b683a1261c9536fb0f(size=105.5 K), total size for store is 105.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T03:50:20,094 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:20,094 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1., storeName=ae871af58eaac429b1e8eee60d9d32a1/info, priority=13, startTime=1732420220064; duration=0sec 2024-11-24T03:50:20,094 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:50:20,094 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ae871af58eaac429b1e8eee60d9d32a1:info 2024-11-24T03:50:20,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:20,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:21,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:21,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:22,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:22,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae871af58eaac429b1e8eee60d9d32a1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T03:50:22,061 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/e8c41341eefb4e8ebcc41fbf72b8b98b is 1080, key is row0157/info:/1732420220044/Put/seqid=0 2024-11-24T03:50:22,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741865_1041 (size=12516) 2024-11-24T03:50:22,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741865_1041 (size=12516) 2024-11-24T03:50:22,085 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/e8c41341eefb4e8ebcc41fbf72b8b98b 2024-11-24T03:50:22,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/e8c41341eefb4e8ebcc41fbf72b8b98b as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e8c41341eefb4e8ebcc41fbf72b8b98b 2024-11-24T03:50:22,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:22,097 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e8c41341eefb4e8ebcc41fbf72b8b98b, entries=7, sequenceid=216, filesize=12.2 K 2024-11-24T03:50:22,098 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for ae871af58eaac429b1e8eee60d9d32a1 in 41ms, sequenceid=216, compaction requested=false 2024-11-24T03:50:22,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:22,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:22,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae871af58eaac429b1e8eee60d9d32a1 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-11-24T03:50:22,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:22,105 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/f5f6420e6eec4e1787310c43029224d0 is 1080, key is row0164/info:/1732420222058/Put/seqid=0 2024-11-24T03:50:22,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741866_1042 (size=26550) 2024-11-24T03:50:22,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741866_1042 (size=26550) 2024-11-24T03:50:22,111 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/f5f6420e6eec4e1787310c43029224d0 2024-11-24T03:50:22,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/f5f6420e6eec4e1787310c43029224d0 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/f5f6420e6eec4e1787310c43029224d0 2024-11-24T03:50:22,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/f5f6420e6eec4e1787310c43029224d0, entries=20, sequenceid=239, filesize=25.9 K 2024-11-24T03:50:22,124 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=9.46 KB/9684 for ae871af58eaac429b1e8eee60d9d32a1 in 24ms, sequenceid=239, compaction requested=true 2024-11-24T03:50:22,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:22,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ae871af58eaac429b1e8eee60d9d32a1:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T03:50:22,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:50:22,124 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T03:50:22,125 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 147094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T03:50:22,125 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1541): ae871af58eaac429b1e8eee60d9d32a1/info is initiating minor compaction (all files) 2024-11-24T03:50:22,125 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ae871af58eaac429b1e8eee60d9d32a1/info in TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:50:22,125 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/d3f901a256cf48b683a1261c9536fb0f, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e8c41341eefb4e8ebcc41fbf72b8b98b, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/f5f6420e6eec4e1787310c43029224d0] into tmpdir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp, totalSize=143.6 K 2024-11-24T03:50:22,126 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting d3f901a256cf48b683a1261c9536fb0f, keycount=95, bloomtype=ROW, size=105.5 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732420183604 2024-11-24T03:50:22,126 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting e8c41341eefb4e8ebcc41fbf72b8b98b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732420220044 2024-11-24T03:50:22,126 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting f5f6420e6eec4e1787310c43029224d0, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732420222058 2024-11-24T03:50:22,138 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ae871af58eaac429b1e8eee60d9d32a1#info#compaction#80 average throughput is 41.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T03:50:22,139 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/9367ba1bc0474ab4ad9d155b747f6e3c is 1080, key is row0062/info:/1732420183604/Put/seqid=0 2024-11-24T03:50:22,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741867_1043 (size=137372) 2024-11-24T03:50:22,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741867_1043 (size=137372) 2024-11-24T03:50:22,147 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/9367ba1bc0474ab4ad9d155b747f6e3c as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/9367ba1bc0474ab4ad9d155b747f6e3c 2024-11-24T03:50:22,154 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ae871af58eaac429b1e8eee60d9d32a1/info of ae871af58eaac429b1e8eee60d9d32a1 into 9367ba1bc0474ab4ad9d155b747f6e3c(size=134.2 K), total size for store is 134.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T03:50:22,154 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:22,154 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1., storeName=ae871af58eaac429b1e8eee60d9d32a1/info, priority=13, startTime=1732420222124; duration=0sec 2024-11-24T03:50:22,154 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:50:22,154 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ae871af58eaac429b1e8eee60d9d32a1:info 2024-11-24T03:50:23,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:23,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:24,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:24,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:24,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:24,123 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae871af58eaac429b1e8eee60d9d32a1 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-24T03:50:24,127 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/99949cc7dac549a0a25bf3d0fbcfd5e1 is 1080, key is row0184/info:/1732420222102/Put/seqid=0 2024-11-24T03:50:24,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741868_1044 (size=15750) 2024-11-24T03:50:24,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741868_1044 (size=15750) 2024-11-24T03:50:24,133 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/99949cc7dac549a0a25bf3d0fbcfd5e1 2024-11-24T03:50:24,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/99949cc7dac549a0a25bf3d0fbcfd5e1 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/99949cc7dac549a0a25bf3d0fbcfd5e1 2024-11-24T03:50:24,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/99949cc7dac549a0a25bf3d0fbcfd5e1, entries=10, sequenceid=253, filesize=15.4 K 2024-11-24T03:50:24,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=11.56 KB/11836 for ae871af58eaac429b1e8eee60d9d32a1 in 25ms, sequenceid=253, compaction requested=false 2024-11-24T03:50:24,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:24,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:24,150 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae871af58eaac429b1e8eee60d9d32a1 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-24T03:50:24,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/3b6665e46e1d4757b4e41bdad46d8714 is 1080, key is row0194/info:/1732420224124/Put/seqid=0 2024-11-24T03:50:24,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741869_1045 (size=17917) 2024-11-24T03:50:24,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741869_1045 (size=17917) 2024-11-24T03:50:24,166 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/3b6665e46e1d4757b4e41bdad46d8714 2024-11-24T03:50:24,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/3b6665e46e1d4757b4e41bdad46d8714 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/3b6665e46e1d4757b4e41bdad46d8714 2024-11-24T03:50:24,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/3b6665e46e1d4757b4e41bdad46d8714, entries=12, sequenceid=268, filesize=17.5 K 2024-11-24T03:50:24,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=15.76 KB/16140 for ae871af58eaac429b1e8eee60d9d32a1 in 32ms, sequenceid=268, compaction requested=true 2024-11-24T03:50:24,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:24,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:24,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ae871af58eaac429b1e8eee60d9d32a1:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T03:50:24,182 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T03:50:24,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:50:24,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae871af58eaac429b1e8eee60d9d32a1 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-24T03:50:24,183 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 171039 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T03:50:24,183 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1541): ae871af58eaac429b1e8eee60d9d32a1/info is initiating minor compaction (all files) 2024-11-24T03:50:24,183 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ae871af58eaac429b1e8eee60d9d32a1/info in TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:50:24,183 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/9367ba1bc0474ab4ad9d155b747f6e3c, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/99949cc7dac549a0a25bf3d0fbcfd5e1, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/3b6665e46e1d4757b4e41bdad46d8714] into tmpdir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp, totalSize=167.0 K 2024-11-24T03:50:24,184 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9367ba1bc0474ab4ad9d155b747f6e3c, keycount=122, bloomtype=ROW, size=134.2 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732420183604 2024-11-24T03:50:24,184 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting 99949cc7dac549a0a25bf3d0fbcfd5e1, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732420222102 2024-11-24T03:50:24,185 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3b6665e46e1d4757b4e41bdad46d8714, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1732420224124 2024-11-24T03:50:24,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/54cb3678cf3949cdb41e7d0eac9af5ee is 1080, key is row0206/info:/1732420224151/Put/seqid=0 2024-11-24T03:50:24,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741870_1046 (size=22254) 2024-11-24T03:50:24,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741870_1046 (size=22254) 2024-11-24T03:50:24,193 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/54cb3678cf3949cdb41e7d0eac9af5ee 2024-11-24T03:50:24,198 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ae871af58eaac429b1e8eee60d9d32a1#info#compaction#84 average throughput is 49.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T03:50:24,199 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/2aac9896ea994cd097e6a9ac027d5203 is 1080, key is row0062/info:/1732420183604/Put/seqid=0 2024-11-24T03:50:24,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/54cb3678cf3949cdb41e7d0eac9af5ee as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/54cb3678cf3949cdb41e7d0eac9af5ee 2024-11-24T03:50:24,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/54cb3678cf3949cdb41e7d0eac9af5ee, entries=16, sequenceid=287, filesize=21.7 K 2024-11-24T03:50:24,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=3.15 KB/3228 for ae871af58eaac429b1e8eee60d9d32a1 in 24ms, sequenceid=287, compaction requested=false 2024-11-24T03:50:24,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:24,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741871_1047 (size=161274) 2024-11-24T03:50:24,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741871_1047 (size=161274) 2024-11-24T03:50:24,619 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/2aac9896ea994cd097e6a9ac027d5203 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/2aac9896ea994cd097e6a9ac027d5203 2024-11-24T03:50:24,625 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ae871af58eaac429b1e8eee60d9d32a1/info of ae871af58eaac429b1e8eee60d9d32a1 into 2aac9896ea994cd097e6a9ac027d5203(size=157.5 K), total size for store is 179.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T03:50:24,625 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:24,625 INFO [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1., storeName=ae871af58eaac429b1e8eee60d9d32a1/info, priority=13, startTime=1732420224182; duration=0sec 2024-11-24T03:50:24,625 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:50:24,626 DEBUG [RS:0;71d8d2d6408d:46657-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ae871af58eaac429b1e8eee60d9d32a1:info 2024-11-24T03:50:25,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:25,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:26,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:26,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:26,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:26,198 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae871af58eaac429b1e8eee60d9d32a1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T03:50:26,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/e1c0c36540474d1bbf5688c5e31d702f is 1080, key is row0222/info:/1732420224183/Put/seqid=0 2024-11-24T03:50:26,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741872_1048 (size=12523) 2024-11-24T03:50:26,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741872_1048 (size=12523) 2024-11-24T03:50:26,209 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/e1c0c36540474d1bbf5688c5e31d702f 2024-11-24T03:50:26,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/e1c0c36540474d1bbf5688c5e31d702f as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e1c0c36540474d1bbf5688c5e31d702f 2024-11-24T03:50:26,221 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e1c0c36540474d1bbf5688c5e31d702f, entries=7, sequenceid=298, filesize=12.2 K 2024-11-24T03:50:26,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for ae871af58eaac429b1e8eee60d9d32a1 in 25ms, sequenceid=298, compaction requested=true 2024-11-24T03:50:26,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:26,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ae871af58eaac429b1e8eee60d9d32a1:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T03:50:26,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:50:26,222 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T03:50:26,223 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 196051 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T03:50:26,223 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HStore(1541): ae871af58eaac429b1e8eee60d9d32a1/info is initiating minor compaction (all files) 2024-11-24T03:50:26,223 INFO [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ae871af58eaac429b1e8eee60d9d32a1/info in TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:50:26,223 INFO [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/2aac9896ea994cd097e6a9ac027d5203, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/54cb3678cf3949cdb41e7d0eac9af5ee, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e1c0c36540474d1bbf5688c5e31d702f] into tmpdir=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp, totalSize=191.5 K 2024-11-24T03:50:26,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46657 {}] regionserver.HRegion(8855): Flush requested on ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:26,224 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ae871af58eaac429b1e8eee60d9d32a1 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-24T03:50:26,224 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] compactions.Compactor(225): Compacting 2aac9896ea994cd097e6a9ac027d5203, keycount=144, bloomtype=ROW, size=157.5 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1732420183604 2024-11-24T03:50:26,225 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] compactions.Compactor(225): Compacting 54cb3678cf3949cdb41e7d0eac9af5ee, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732420224151 2024-11-24T03:50:26,225 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] compactions.Compactor(225): Compacting e1c0c36540474d1bbf5688c5e31d702f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1732420224183 2024-11-24T03:50:26,228 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/4b3dd3002c9743c08dd3f468fec7b568 is 1080, key is row0229/info:/1732420226199/Put/seqid=0 2024-11-24T03:50:26,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741873_1049 (size=17918) 2024-11-24T03:50:26,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741873_1049 (size=17918) 2024-11-24T03:50:26,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/4b3dd3002c9743c08dd3f468fec7b568 2024-11-24T03:50:26,255 INFO [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ae871af58eaac429b1e8eee60d9d32a1#info#compaction#87 average throughput is 42.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T03:50:26,256 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/1516fc0eacea43d4924fff00e923c997 is 1080, key is row0062/info:/1732420183604/Put/seqid=0 2024-11-24T03:50:26,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/4b3dd3002c9743c08dd3f468fec7b568 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/4b3dd3002c9743c08dd3f468fec7b568 2024-11-24T03:50:26,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741874_1050 (size=186197) 2024-11-24T03:50:26,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741874_1050 (size=186197) 2024-11-24T03:50:26,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/4b3dd3002c9743c08dd3f468fec7b568, entries=12, sequenceid=313, filesize=17.5 K 2024-11-24T03:50:26,267 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=16.81 KB/17216 for ae871af58eaac429b1e8eee60d9d32a1 in 43ms, sequenceid=313, compaction requested=false 2024-11-24T03:50:26,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:26,268 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/1516fc0eacea43d4924fff00e923c997 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/1516fc0eacea43d4924fff00e923c997 2024-11-24T03:50:26,275 INFO [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ae871af58eaac429b1e8eee60d9d32a1/info of ae871af58eaac429b1e8eee60d9d32a1 into 1516fc0eacea43d4924fff00e923c997(size=181.8 K), total size for store is 199.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T03:50:26,275 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:26,275 INFO [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1., storeName=ae871af58eaac429b1e8eee60d9d32a1/info, priority=13, startTime=1732420226222; duration=0sec 2024-11-24T03:50:26,275 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T03:50:26,275 DEBUG [RS:0;71d8d2d6408d:46657-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ae871af58eaac429b1e8eee60d9d32a1:info 2024-11-24T03:50:27,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:27,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:28,064 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=70, reuseRatio=88.61% 2024-11-24T03:50:28,065 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-24T03:50:28,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:28,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:28,261 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-24T03:50:28,261 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C46657%2C1732420170225.1732420228261 2024-11-24T03:50:28,278 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,278 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,278 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,279 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,279 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,279 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/WALs/71d8d2d6408d,46657,1732420170225/71d8d2d6408d%2C46657%2C1732420170225.1732420170849 with entries=311, filesize=307.82 KB; new WAL /user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/WALs/71d8d2d6408d,46657,1732420170225/71d8d2d6408d%2C46657%2C1732420170225.1732420228261 2024-11-24T03:50:28,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741833_1009 (size=315212) 2024-11-24T03:50:28,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741833_1009 (size=315212) 2024-11-24T03:50:28,288 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38639:38639),(127.0.0.1/127.0.0.1:37621:37621)] 2024-11-24T03:50:28,291 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 3f0ba190cb5e13cac46a5e798061202a: 2024-11-24T03:50:28,291 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing ae871af58eaac429b1e8eee60d9d32a1 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-24T03:50:28,297 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/e57efe440bf947cab56b9acf9fb1bb9b is 1080, key is row0241/info:/1732420226225/Put/seqid=0 2024-11-24T03:50:28,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741876_1052 (size=22254) 2024-11-24T03:50:28,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741876_1052 (size=22254) 2024-11-24T03:50:28,307 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/e57efe440bf947cab56b9acf9fb1bb9b 2024-11-24T03:50:28,312 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/.tmp/info/e57efe440bf947cab56b9acf9fb1bb9b as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e57efe440bf947cab56b9acf9fb1bb9b 2024-11-24T03:50:28,317 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e57efe440bf947cab56b9acf9fb1bb9b, entries=16, sequenceid=333, filesize=21.7 K 2024-11-24T03:50:28,318 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=0 B/0 for ae871af58eaac429b1e8eee60d9d32a1 in 27ms, sequenceid=333, compaction requested=true 2024-11-24T03:50:28,318 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for ae871af58eaac429b1e8eee60d9d32a1: 2024-11-24T03:50:28,318 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-24T03:50:28,322 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/.tmp/info/5d1f923540d64f9cb6ca25f86fb7bff9 is 186, key is TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a./info:regioninfo/1732420194421/Put/seqid=0 2024-11-24T03:50:28,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741877_1053 (size=6153) 2024-11-24T03:50:28,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741877_1053 (size=6153) 2024-11-24T03:50:28,327 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/.tmp/info/5d1f923540d64f9cb6ca25f86fb7bff9 2024-11-24T03:50:28,333 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/.tmp/info/5d1f923540d64f9cb6ca25f86fb7bff9 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/info/5d1f923540d64f9cb6ca25f86fb7bff9 2024-11-24T03:50:28,337 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/info/5d1f923540d64f9cb6ca25f86fb7bff9, entries=5, sequenceid=21, filesize=6.0 K 2024-11-24T03:50:28,339 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 20ms, sequenceid=21, compaction requested=false 2024-11-24T03:50:28,339 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T03:50:28,339 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C46657%2C1732420170225.1732420228339 2024-11-24T03:50:28,343 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,344 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,344 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,344 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,344 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,344 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/WALs/71d8d2d6408d,46657,1732420170225/71d8d2d6408d%2C46657%2C1732420170225.1732420228261 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/WALs/71d8d2d6408d,46657,1732420170225/71d8d2d6408d%2C46657%2C1732420170225.1732420228339 2024-11-24T03:50:28,345 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37621:37621),(127.0.0.1/127.0.0.1:38639:38639)] 2024-11-24T03:50:28,345 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/WALs/71d8d2d6408d,46657,1732420170225/71d8d2d6408d%2C46657%2C1732420170225.1732420228261 is not closed yet, will try archiving it next time 2024-11-24T03:50:28,345 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/WALs/71d8d2d6408d,46657,1732420170225/71d8d2d6408d%2C46657%2C1732420170225.1732420170849 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/oldWALs/71d8d2d6408d%2C46657%2C1732420170225.1732420170849 2024-11-24T03:50:28,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741875_1051 (size=731) 2024-11-24T03:50:28,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741875_1051 (size=731) 2024-11-24T03:50:28,345 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T03:50:28,346 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/WALs/71d8d2d6408d,46657,1732420170225/71d8d2d6408d%2C46657%2C1732420170225.1732420228261 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/oldWALs/71d8d2d6408d%2C46657%2C1732420170225.1732420228261 2024-11-24T03:50:28,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T03:50:28,446 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T03:50:28,446 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:50:28,446 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:50:28,446 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T03:50:28,446 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:50:28,447 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T03:50:28,447 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1853582582, stopped=false 2024-11-24T03:50:28,447 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=71d8d2d6408d,35951,1732420170088 2024-11-24T03:50:28,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T03:50:28,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T03:50:28,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:28,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:28,507 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T03:50:28,508 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T03:50:28,508 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:50:28,508 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:50:28,508 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:50:28,508 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '71d8d2d6408d,46657,1732420170225' ***** 2024-11-24T03:50:28,508 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T03:50:28,508 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:50:28,509 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T03:50:28,509 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T03:50:28,509 INFO [RS:0;71d8d2d6408d:46657 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T03:50:28,509 INFO [RS:0;71d8d2d6408d:46657 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T03:50:28,509 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(3091): Received CLOSE for 3f0ba190cb5e13cac46a5e798061202a 2024-11-24T03:50:28,509 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(3091): Received CLOSE for ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:28,509 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(959): stopping server 71d8d2d6408d,46657,1732420170225 2024-11-24T03:50:28,509 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T03:50:28,509 INFO [RS:0;71d8d2d6408d:46657 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;71d8d2d6408d:46657. 2024-11-24T03:50:28,509 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3f0ba190cb5e13cac46a5e798061202a, disabling compactions & flushes 2024-11-24T03:50:28,509 DEBUG [RS:0;71d8d2d6408d:46657 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:50:28,509 DEBUG [RS:0;71d8d2d6408d:46657 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:50:28,509 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a. 2024-11-24T03:50:28,509 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a. 2024-11-24T03:50:28,509 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T03:50:28,509 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a. after waiting 0 ms 2024-11-24T03:50:28,509 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T03:50:28,509 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T03:50:28,509 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a. 2024-11-24T03:50:28,509 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T03:50:28,509 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-24T03:50:28,509 DEBUG [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(1325): Online Regions={3f0ba190cb5e13cac46a5e798061202a=TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a., ae871af58eaac429b1e8eee60d9d32a1=TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1., 1588230740=hbase:meta,,1.1588230740} 2024-11-24T03:50:28,509 DEBUG [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 3f0ba190cb5e13cac46a5e798061202a, ae871af58eaac429b1e8eee60d9d32a1 2024-11-24T03:50:28,509 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T03:50:28,509 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T03:50:28,509 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T03:50:28,509 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T03:50:28,509 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T03:50:28,509 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/3f0ba190cb5e13cac46a5e798061202a/info/d41158420ada4abda01a373cc43607fe.18651f7951b24351b6deb47b7480bab7->hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/d41158420ada4abda01a373cc43607fe-bottom] to archive 2024-11-24T03:50:28,510 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T03:50:28,512 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/3f0ba190cb5e13cac46a5e798061202a/info/d41158420ada4abda01a373cc43607fe.18651f7951b24351b6deb47b7480bab7 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/3f0ba190cb5e13cac46a5e798061202a/info/d41158420ada4abda01a373cc43607fe.18651f7951b24351b6deb47b7480bab7 2024-11-24T03:50:28,512 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=71d8d2d6408d:35951 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T03:50:28,512 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-24T03:50:28,513 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-24T03:50:28,514 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T03:50:28,514 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T03:50:28,514 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732420228509Running coprocessor pre-close hooks at 1732420228509Disabling compacts and flushes for region at 1732420228509Disabling writes for close at 1732420228509Writing region close event to WAL at 1732420228510 (+1 ms)Running coprocessor post-close hooks at 1732420228514 (+4 ms)Closed at 1732420228514 2024-11-24T03:50:28,514 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T03:50:28,516 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/3f0ba190cb5e13cac46a5e798061202a/recovered.edits/90.seqid, newMaxSeqId=90, maxSeqId=85 2024-11-24T03:50:28,516 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a. 2024-11-24T03:50:28,516 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3f0ba190cb5e13cac46a5e798061202a: Waiting for close lock at 1732420228509Running coprocessor pre-close hooks at 1732420228509Disabling compacts and flushes for region at 1732420228509Disabling writes for close at 1732420228509Writing region close event to WAL at 1732420228513 (+4 ms)Running coprocessor post-close hooks at 1732420228516 (+3 ms)Closed at 1732420228516 2024-11-24T03:50:28,517 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732420193743.3f0ba190cb5e13cac46a5e798061202a. 2024-11-24T03:50:28,517 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ae871af58eaac429b1e8eee60d9d32a1, disabling compactions & flushes 2024-11-24T03:50:28,517 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:50:28,517 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:50:28,517 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. after waiting 0 ms 2024-11-24T03:50:28,517 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:50:28,517 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/d41158420ada4abda01a373cc43607fe.18651f7951b24351b6deb47b7480bab7->hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/18651f7951b24351b6deb47b7480bab7/info/d41158420ada4abda01a373cc43607fe-top, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e8ad2fec95544cf09a8d6602ee768e31, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/1f95873a356c4d17897c97ea11a41f90, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/a3ee23df246949f0887408962da87445, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/4face96583f14011a43ff4e80c054054, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/5a863ee02dd8423880a0634eecd50816, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/c495ab73cc224f54b0548641f27ddb17, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/809e147b16ee40e6ae2ea4f5f878c128, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/1dc967f9c0894e398ec3f534487450be, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/51fb563d797a4cfabe04fd7d45ef9271, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/ed9b6b32b429468d9e8d4f6c71f824a6, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/686e29d3047441d18d7818c4aedd6067, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/d3f901a256cf48b683a1261c9536fb0f, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/a8f7e29dfa5d4376bf969c835c24190a, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e8c41341eefb4e8ebcc41fbf72b8b98b, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/9367ba1bc0474ab4ad9d155b747f6e3c, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/f5f6420e6eec4e1787310c43029224d0, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/99949cc7dac549a0a25bf3d0fbcfd5e1, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/2aac9896ea994cd097e6a9ac027d5203, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/3b6665e46e1d4757b4e41bdad46d8714, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/54cb3678cf3949cdb41e7d0eac9af5ee, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e1c0c36540474d1bbf5688c5e31d702f] to archive 2024-11-24T03:50:28,518 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T03:50:28,520 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/d41158420ada4abda01a373cc43607fe.18651f7951b24351b6deb47b7480bab7 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/d41158420ada4abda01a373cc43607fe.18651f7951b24351b6deb47b7480bab7 2024-11-24T03:50:28,521 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e8ad2fec95544cf09a8d6602ee768e31 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e8ad2fec95544cf09a8d6602ee768e31 2024-11-24T03:50:28,522 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/1f95873a356c4d17897c97ea11a41f90 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/1f95873a356c4d17897c97ea11a41f90 2024-11-24T03:50:28,523 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/a3ee23df246949f0887408962da87445 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/a3ee23df246949f0887408962da87445 2024-11-24T03:50:28,524 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/4face96583f14011a43ff4e80c054054 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/4face96583f14011a43ff4e80c054054 2024-11-24T03:50:28,525 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/5a863ee02dd8423880a0634eecd50816 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/5a863ee02dd8423880a0634eecd50816 2024-11-24T03:50:28,526 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/c495ab73cc224f54b0548641f27ddb17 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/c495ab73cc224f54b0548641f27ddb17 2024-11-24T03:50:28,527 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/809e147b16ee40e6ae2ea4f5f878c128 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/809e147b16ee40e6ae2ea4f5f878c128 2024-11-24T03:50:28,528 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/1dc967f9c0894e398ec3f534487450be to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/1dc967f9c0894e398ec3f534487450be 2024-11-24T03:50:28,529 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/51fb563d797a4cfabe04fd7d45ef9271 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/51fb563d797a4cfabe04fd7d45ef9271 2024-11-24T03:50:28,530 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/ed9b6b32b429468d9e8d4f6c71f824a6 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/ed9b6b32b429468d9e8d4f6c71f824a6 2024-11-24T03:50:28,531 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/686e29d3047441d18d7818c4aedd6067 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/686e29d3047441d18d7818c4aedd6067 2024-11-24T03:50:28,533 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/d3f901a256cf48b683a1261c9536fb0f to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/d3f901a256cf48b683a1261c9536fb0f 2024-11-24T03:50:28,533 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/a8f7e29dfa5d4376bf969c835c24190a to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/a8f7e29dfa5d4376bf969c835c24190a 2024-11-24T03:50:28,534 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e8c41341eefb4e8ebcc41fbf72b8b98b to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e8c41341eefb4e8ebcc41fbf72b8b98b 2024-11-24T03:50:28,535 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/9367ba1bc0474ab4ad9d155b747f6e3c to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/9367ba1bc0474ab4ad9d155b747f6e3c 2024-11-24T03:50:28,536 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/f5f6420e6eec4e1787310c43029224d0 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/f5f6420e6eec4e1787310c43029224d0 2024-11-24T03:50:28,537 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/99949cc7dac549a0a25bf3d0fbcfd5e1 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/99949cc7dac549a0a25bf3d0fbcfd5e1 2024-11-24T03:50:28,538 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/2aac9896ea994cd097e6a9ac027d5203 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/2aac9896ea994cd097e6a9ac027d5203 2024-11-24T03:50:28,538 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/3b6665e46e1d4757b4e41bdad46d8714 to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/3b6665e46e1d4757b4e41bdad46d8714 2024-11-24T03:50:28,539 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/54cb3678cf3949cdb41e7d0eac9af5ee to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/54cb3678cf3949cdb41e7d0eac9af5ee 2024-11-24T03:50:28,540 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e1c0c36540474d1bbf5688c5e31d702f to hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/archive/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/info/e1c0c36540474d1bbf5688c5e31d702f 2024-11-24T03:50:28,540 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [e8ad2fec95544cf09a8d6602ee768e31=8260, 1f95873a356c4d17897c97ea11a41f90=12509, a3ee23df246949f0887408962da87445=27778, 4face96583f14011a43ff4e80c054054=16817, 5a863ee02dd8423880a0634eecd50816=18987, c495ab73cc224f54b0548641f27ddb17=49463, 809e147b16ee40e6ae2ea4f5f878c128=12516, 1dc967f9c0894e398ec3f534487450be=15750, 51fb563d797a4cfabe04fd7d45ef9271=75578, ed9b6b32b429468d9e8d4f6c71f824a6=20078, 686e29d3047441d18d7818c4aedd6067=12516, d3f901a256cf48b683a1261c9536fb0f=108028, a8f7e29dfa5d4376bf969c835c24190a=29784, e8c41341eefb4e8ebcc41fbf72b8b98b=12516, 9367ba1bc0474ab4ad9d155b747f6e3c=137372, f5f6420e6eec4e1787310c43029224d0=26550, 99949cc7dac549a0a25bf3d0fbcfd5e1=15750, 2aac9896ea994cd097e6a9ac027d5203=161274, 3b6665e46e1d4757b4e41bdad46d8714=17917, 54cb3678cf3949cdb41e7d0eac9af5ee=22254, e1c0c36540474d1bbf5688c5e31d702f=12523] 2024-11-24T03:50:28,543 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/data/default/TestLogRolling-testLogRolling/ae871af58eaac429b1e8eee60d9d32a1/recovered.edits/336.seqid, newMaxSeqId=336, maxSeqId=85 2024-11-24T03:50:28,544 INFO [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:50:28,544 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ae871af58eaac429b1e8eee60d9d32a1: Waiting for close lock at 1732420228517Running coprocessor pre-close hooks at 1732420228517Disabling compacts and flushes for region at 1732420228517Disabling writes for close at 1732420228517Writing region close event to WAL at 1732420228540 (+23 ms)Running coprocessor post-close hooks at 1732420228544 (+4 ms)Closed at 1732420228544 2024-11-24T03:50:28,544 DEBUG [RS_CLOSE_REGION-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732420193743.ae871af58eaac429b1e8eee60d9d32a1. 2024-11-24T03:50:28,710 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(976): stopping server 71d8d2d6408d,46657,1732420170225; all regions closed. 2024-11-24T03:50:28,710 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,710 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,710 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,710 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,710 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741834_1010 (size=8107) 2024-11-24T03:50:28,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741834_1010 (size=8107) 2024-11-24T03:50:28,713 INFO [regionserver/71d8d2d6408d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T03:50:28,713 INFO [regionserver/71d8d2d6408d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T03:50:28,714 DEBUG [RS:0;71d8d2d6408d:46657 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/oldWALs 2024-11-24T03:50:28,714 INFO [RS:0;71d8d2d6408d:46657 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 71d8d2d6408d%2C46657%2C1732420170225.meta:.meta(num 1732420171199) 2024-11-24T03:50:28,715 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,715 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,715 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,715 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,715 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,715 INFO [regionserver/71d8d2d6408d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T03:50:28,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741878_1054 (size=778) 2024-11-24T03:50:28,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741878_1054 (size=778) 2024-11-24T03:50:28,718 DEBUG [RS:0;71d8d2d6408d:46657 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/oldWALs 2024-11-24T03:50:28,718 INFO [RS:0;71d8d2d6408d:46657 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 71d8d2d6408d%2C46657%2C1732420170225:(num 1732420228339) 2024-11-24T03:50:28,718 DEBUG [RS:0;71d8d2d6408d:46657 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:50:28,719 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T03:50:28,719 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T03:50:28,719 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.ChoreService(370): Chore service for: regionserver/71d8d2d6408d:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T03:50:28,719 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T03:50:28,719 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T03:50:28,719 INFO [RS:0;71d8d2d6408d:46657 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46657 2024-11-24T03:50:28,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T03:50:28,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/71d8d2d6408d,46657,1732420170225 2024-11-24T03:50:28,749 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T03:50:28,757 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [71d8d2d6408d,46657,1732420170225] 2024-11-24T03:50:28,766 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/71d8d2d6408d,46657,1732420170225 already deleted, retry=false 2024-11-24T03:50:28,766 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 71d8d2d6408d,46657,1732420170225 expired; onlineServers=0 2024-11-24T03:50:28,766 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '71d8d2d6408d,35951,1732420170088' ***** 2024-11-24T03:50:28,766 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T03:50:28,766 INFO [M:0;71d8d2d6408d:35951 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T03:50:28,766 INFO [M:0;71d8d2d6408d:35951 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T03:50:28,767 DEBUG [M:0;71d8d2d6408d:35951 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T03:50:28,767 DEBUG [M:0;71d8d2d6408d:35951 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T03:50:28,767 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T03:50:28,767 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420170571 {}] cleaner.HFileCleaner(306): Exit Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420170571,5,FailOnTimeoutGroup] 2024-11-24T03:50:28,767 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420170571 {}] cleaner.HFileCleaner(306): Exit Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420170571,5,FailOnTimeoutGroup] 2024-11-24T03:50:28,767 INFO [M:0;71d8d2d6408d:35951 {}] hbase.ChoreService(370): Chore service for: master/71d8d2d6408d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T03:50:28,767 INFO [M:0;71d8d2d6408d:35951 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T03:50:28,767 DEBUG [M:0;71d8d2d6408d:35951 {}] master.HMaster(1795): Stopping service threads 2024-11-24T03:50:28,767 INFO [M:0;71d8d2d6408d:35951 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T03:50:28,768 INFO [M:0;71d8d2d6408d:35951 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T03:50:28,768 INFO [M:0;71d8d2d6408d:35951 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T03:50:28,768 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T03:50:28,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T03:50:28,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:28,779 DEBUG [M:0;71d8d2d6408d:35951 {}] zookeeper.ZKUtil(347): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T03:50:28,779 WARN [M:0;71d8d2d6408d:35951 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T03:50:28,780 INFO [M:0;71d8d2d6408d:35951 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/.lastflushedseqids 2024-11-24T03:50:28,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741879_1055 (size=228) 2024-11-24T03:50:28,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741879_1055 (size=228) 2024-11-24T03:50:28,787 INFO [M:0;71d8d2d6408d:35951 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T03:50:28,787 INFO [M:0;71d8d2d6408d:35951 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T03:50:28,788 DEBUG [M:0;71d8d2d6408d:35951 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T03:50:28,788 INFO [M:0;71d8d2d6408d:35951 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:50:28,788 DEBUG [M:0;71d8d2d6408d:35951 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:50:28,788 DEBUG [M:0;71d8d2d6408d:35951 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T03:50:28,788 DEBUG [M:0;71d8d2d6408d:35951 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:50:28,788 INFO [M:0;71d8d2d6408d:35951 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-24T03:50:28,810 DEBUG [M:0;71d8d2d6408d:35951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d844ec1828604bbe9e20569dc4acde72 is 82, key is hbase:meta,,1/info:regioninfo/1732420171226/Put/seqid=0 2024-11-24T03:50:28,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741880_1056 (size=5672) 2024-11-24T03:50:28,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741880_1056 (size=5672) 2024-11-24T03:50:28,815 INFO [M:0;71d8d2d6408d:35951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d844ec1828604bbe9e20569dc4acde72 2024-11-24T03:50:28,833 DEBUG [M:0;71d8d2d6408d:35951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/14623802f7eb491a86f7a74430b95491 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732420171728/Put/seqid=0 2024-11-24T03:50:28,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741881_1057 (size=7090) 2024-11-24T03:50:28,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741881_1057 (size=7090) 2024-11-24T03:50:28,839 INFO [M:0;71d8d2d6408d:35951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/14623802f7eb491a86f7a74430b95491 2024-11-24T03:50:28,842 INFO [M:0;71d8d2d6408d:35951 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 14623802f7eb491a86f7a74430b95491 2024-11-24T03:50:28,855 DEBUG [M:0;71d8d2d6408d:35951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3e7d158503ff43f780de432e9a1f4b06 is 69, key is 71d8d2d6408d,46657,1732420170225/rs:state/1732420170691/Put/seqid=0 2024-11-24T03:50:28,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:50:28,857 INFO [RS:0;71d8d2d6408d:46657 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T03:50:28,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46657-0x1016c3f841f0001, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:50:28,857 INFO [RS:0;71d8d2d6408d:46657 {}] regionserver.HRegionServer(1031): Exiting; stopping=71d8d2d6408d,46657,1732420170225; zookeeper connection closed. 2024-11-24T03:50:28,858 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1e3ea3fd {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1e3ea3fd 2024-11-24T03:50:28,858 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T03:50:28,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741882_1058 (size=5156) 2024-11-24T03:50:28,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741882_1058 (size=5156) 2024-11-24T03:50:28,860 INFO [M:0;71d8d2d6408d:35951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3e7d158503ff43f780de432e9a1f4b06 2024-11-24T03:50:28,877 DEBUG [M:0;71d8d2d6408d:35951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b052f7fa98764261a60ed7681221cede is 52, key is load_balancer_on/state:d/1732420171355/Put/seqid=0 2024-11-24T03:50:28,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741883_1059 (size=5056) 2024-11-24T03:50:28,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741883_1059 (size=5056) 2024-11-24T03:50:28,881 INFO [M:0;71d8d2d6408d:35951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b052f7fa98764261a60ed7681221cede 2024-11-24T03:50:28,886 DEBUG [M:0;71d8d2d6408d:35951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d844ec1828604bbe9e20569dc4acde72 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d844ec1828604bbe9e20569dc4acde72 2024-11-24T03:50:28,891 INFO [M:0;71d8d2d6408d:35951 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d844ec1828604bbe9e20569dc4acde72, entries=8, sequenceid=125, filesize=5.5 K 2024-11-24T03:50:28,892 DEBUG [M:0;71d8d2d6408d:35951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/14623802f7eb491a86f7a74430b95491 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/14623802f7eb491a86f7a74430b95491 2024-11-24T03:50:28,897 INFO [M:0;71d8d2d6408d:35951 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 14623802f7eb491a86f7a74430b95491 2024-11-24T03:50:28,897 INFO [M:0;71d8d2d6408d:35951 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/14623802f7eb491a86f7a74430b95491, entries=13, sequenceid=125, filesize=6.9 K 2024-11-24T03:50:28,898 DEBUG [M:0;71d8d2d6408d:35951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3e7d158503ff43f780de432e9a1f4b06 as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3e7d158503ff43f780de432e9a1f4b06 2024-11-24T03:50:28,903 INFO [M:0;71d8d2d6408d:35951 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3e7d158503ff43f780de432e9a1f4b06, entries=1, sequenceid=125, filesize=5.0 K 2024-11-24T03:50:28,904 DEBUG [M:0;71d8d2d6408d:35951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b052f7fa98764261a60ed7681221cede as hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b052f7fa98764261a60ed7681221cede 2024-11-24T03:50:28,908 INFO [M:0;71d8d2d6408d:35951 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39181/user/jenkins/test-data/bb296256-e39c-c310-05e6-95513daf7b8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b052f7fa98764261a60ed7681221cede, entries=1, sequenceid=125, filesize=4.9 K 2024-11-24T03:50:28,909 INFO [M:0;71d8d2d6408d:35951 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=125, compaction requested=false 2024-11-24T03:50:28,910 INFO [M:0;71d8d2d6408d:35951 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:50:28,910 DEBUG [M:0;71d8d2d6408d:35951 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732420228787Disabling compacts and flushes for region at 1732420228787Disabling writes for close at 1732420228788 (+1 ms)Obtaining lock to block concurrent updates at 1732420228788Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732420228788Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1732420228788Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732420228789 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732420228789Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732420228810 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732420228810Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732420228819 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732420228832 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732420228832Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732420228842 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732420228855 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732420228855Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732420228863 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732420228876 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732420228876Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5cef44dd: reopening flushed file at 1732420228886 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b8d00da: reopening flushed file at 1732420228891 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e5b1d71: reopening flushed file at 1732420228897 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3073ad8d: reopening flushed file at 1732420228903 (+6 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=125, compaction requested=false at 1732420228909 (+6 ms)Writing region close event to WAL at 1732420228910 (+1 ms)Closed at 1732420228910 2024-11-24T03:50:28,913 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,913 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,913 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,914 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,914 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:28,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741830_1006 (size=61320) 2024-11-24T03:50:28,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33007 is added to blk_1073741830_1006 (size=61320) 2024-11-24T03:50:28,916 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T03:50:28,916 INFO [M:0;71d8d2d6408d:35951 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T03:50:28,916 INFO [M:0;71d8d2d6408d:35951 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35951 2024-11-24T03:50:28,916 INFO [M:0;71d8d2d6408d:35951 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T03:50:29,046 INFO [M:0;71d8d2d6408d:35951 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T03:50:29,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:50:29,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35951-0x1016c3f841f0000, quorum=127.0.0.1:56614, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:50:29,048 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ddec4b9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:50:29,048 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@637ef10b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:50:29,048 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:50:29,049 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7dc3fcc3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:50:29,049 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@604cd81b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/hadoop.log.dir/,STOPPED} 2024-11-24T03:50:29,054 WARN [BP-2086317584-172.17.0.2-1732420168448 heartbeating to localhost/127.0.0.1:39181 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:50:29,054 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:50:29,054 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:50:29,054 WARN [BP-2086317584-172.17.0.2-1732420168448 heartbeating to localhost/127.0.0.1:39181 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2086317584-172.17.0.2-1732420168448 (Datanode Uuid eb6d2498-093a-4d4f-ab8e-cba0767cb1b0) service to localhost/127.0.0.1:39181 2024-11-24T03:50:29,055 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/cluster_9205b51b-4f95-8b19-2a98-bca52bf86be3/data/data3/current/BP-2086317584-172.17.0.2-1732420168448 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:50:29,055 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/cluster_9205b51b-4f95-8b19-2a98-bca52bf86be3/data/data4/current/BP-2086317584-172.17.0.2-1732420168448 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:50:29,056 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:50:29,057 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b8ac8f8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:50:29,058 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@457f5010{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:50:29,058 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:50:29,058 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fd0bcc5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:50:29,058 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1cb72b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/hadoop.log.dir/,STOPPED} 2024-11-24T03:50:29,059 WARN [BP-2086317584-172.17.0.2-1732420168448 heartbeating to localhost/127.0.0.1:39181 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:50:29,059 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:50:29,059 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:50:29,059 WARN [BP-2086317584-172.17.0.2-1732420168448 heartbeating to localhost/127.0.0.1:39181 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2086317584-172.17.0.2-1732420168448 (Datanode Uuid 1fac988b-a195-4679-93e0-0d1e690751ad) service to localhost/127.0.0.1:39181 2024-11-24T03:50:29,060 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/cluster_9205b51b-4f95-8b19-2a98-bca52bf86be3/data/data1/current/BP-2086317584-172.17.0.2-1732420168448 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:50:29,060 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/cluster_9205b51b-4f95-8b19-2a98-bca52bf86be3/data/data2/current/BP-2086317584-172.17.0.2-1732420168448 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:50:29,060 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:50:29,066 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@32fa876c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T03:50:29,066 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33d970b6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:50:29,066 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:50:29,066 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c4ff7f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:50:29,066 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79a27881{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/hadoop.log.dir/,STOPPED} 2024-11-24T03:50:29,076 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T03:50:29,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:29,103 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T03:50:29,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:29,112 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 208) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39181 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39181 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:39181 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:39181 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39181 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:39181 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39181 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:39181 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=512 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=239 (was 222) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6355 (was 6612) 2024-11-24T03:50:29,121 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=512, MaxFileDescriptor=1048576, SystemLoadAverage=239, ProcessCount=11, AvailableMemoryMB=6354 2024-11-24T03:50:29,121 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T03:50:29,121 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/hadoop.log.dir so I do NOT create it in target/test-data/c366f090-a314-cf66-c22a-3124423846bd 2024-11-24T03:50:29,121 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9678d844-1bb0-ca10-48eb-26c5bbb3e76d/hadoop.tmp.dir so I do NOT create it in target/test-data/c366f090-a314-cf66-c22a-3124423846bd 2024-11-24T03:50:29,122 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/cluster_3dce4eda-8f3d-9ec4-f90a-813d4d6e2810, deleteOnExit=true 2024-11-24T03:50:29,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T03:50:29,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/test.cache.data in system properties and HBase conf 2024-11-24T03:50:29,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T03:50:29,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/hadoop.log.dir in system properties and HBase conf 2024-11-24T03:50:29,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T03:50:29,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T03:50:29,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T03:50:29,122 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T03:50:29,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T03:50:29,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T03:50:29,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T03:50:29,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T03:50:29,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T03:50:29,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T03:50:29,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T03:50:29,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T03:50:29,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T03:50:29,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/nfs.dump.dir in system properties and HBase conf 2024-11-24T03:50:29,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/java.io.tmpdir in system properties and HBase conf 2024-11-24T03:50:29,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T03:50:29,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T03:50:29,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T03:50:29,135 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T03:50:29,401 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:50:29,405 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:50:29,406 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:50:29,406 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:50:29,407 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T03:50:29,408 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:50:29,408 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@416a4238{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:50:29,409 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f9ff378{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:50:29,510 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f697625{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/java.io.tmpdir/jetty-localhost-34625-hadoop-hdfs-3_4_1-tests_jar-_-any-8541566652448045167/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T03:50:29,511 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2054df1e{HTTP/1.1, (http/1.1)}{localhost:34625} 2024-11-24T03:50:29,511 INFO [Time-limited test {}] server.Server(415): Started @309246ms 2024-11-24T03:50:29,521 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T03:50:29,694 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:50:29,697 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:50:29,698 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:50:29,698 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:50:29,698 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T03:50:29,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24e08866{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:50:29,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@465e8484{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:50:29,791 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3353f5c1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/java.io.tmpdir/jetty-localhost-34581-hadoop-hdfs-3_4_1-tests_jar-_-any-16118168047916593654/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:50:29,791 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75fc85f8{HTTP/1.1, (http/1.1)}{localhost:34581} 2024-11-24T03:50:29,791 INFO [Time-limited test {}] server.Server(415): Started @309526ms 2024-11-24T03:50:29,792 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:50:29,831 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T03:50:29,833 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T03:50:29,834 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T03:50:29,834 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T03:50:29,834 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T03:50:29,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5d1d035a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/hadoop.log.dir/,AVAILABLE} 2024-11-24T03:50:29,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c79f83b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T03:50:29,958 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a40f8b4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/java.io.tmpdir/jetty-localhost-34017-hadoop-hdfs-3_4_1-tests_jar-_-any-6555630662113254786/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:50:29,959 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@185b315d{HTTP/1.1, (http/1.1)}{localhost:34017} 2024-11-24T03:50:29,959 INFO [Time-limited test {}] server.Server(415): Started @309694ms 2024-11-24T03:50:29,960 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T03:50:30,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:30,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:30,519 WARN [Thread-2478 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/cluster_3dce4eda-8f3d-9ec4-f90a-813d4d6e2810/data/data1/current/BP-1214609500-172.17.0.2-1732420229139/current, will proceed with Du for space computation calculation, 2024-11-24T03:50:30,519 WARN [Thread-2479 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/cluster_3dce4eda-8f3d-9ec4-f90a-813d4d6e2810/data/data2/current/BP-1214609500-172.17.0.2-1732420229139/current, will proceed with Du for space computation calculation, 2024-11-24T03:50:30,551 WARN [Thread-2443 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:50:30,553 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xad1026e0376dba01 with lease ID 0xf09b3265bae9a68c: Processing first storage report for DS-a90f063f-1f4c-411a-83f6-9f97bde73b87 from datanode DatanodeRegistration(127.0.0.1:45059, datanodeUuid=22fbc400-1564-4a5b-8e21-d80388bc1aff, infoPort=36915, infoSecurePort=0, ipcPort=42601, storageInfo=lv=-57;cid=testClusterID;nsid=1584354902;c=1732420229139) 2024-11-24T03:50:30,553 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xad1026e0376dba01 with lease ID 0xf09b3265bae9a68c: from storage DS-a90f063f-1f4c-411a-83f6-9f97bde73b87 node DatanodeRegistration(127.0.0.1:45059, datanodeUuid=22fbc400-1564-4a5b-8e21-d80388bc1aff, infoPort=36915, infoSecurePort=0, ipcPort=42601, storageInfo=lv=-57;cid=testClusterID;nsid=1584354902;c=1732420229139), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:50:30,553 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xad1026e0376dba01 with lease ID 0xf09b3265bae9a68c: Processing first storage report for DS-6a9ca336-3301-47dd-9776-93f7a68cbf2a from datanode DatanodeRegistration(127.0.0.1:45059, datanodeUuid=22fbc400-1564-4a5b-8e21-d80388bc1aff, infoPort=36915, infoSecurePort=0, ipcPort=42601, storageInfo=lv=-57;cid=testClusterID;nsid=1584354902;c=1732420229139) 2024-11-24T03:50:30,553 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xad1026e0376dba01 with lease ID 0xf09b3265bae9a68c: from storage DS-6a9ca336-3301-47dd-9776-93f7a68cbf2a node DatanodeRegistration(127.0.0.1:45059, datanodeUuid=22fbc400-1564-4a5b-8e21-d80388bc1aff, infoPort=36915, infoSecurePort=0, ipcPort=42601, storageInfo=lv=-57;cid=testClusterID;nsid=1584354902;c=1732420229139), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:50:30,703 WARN [Thread-2490 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/cluster_3dce4eda-8f3d-9ec4-f90a-813d4d6e2810/data/data3/current/BP-1214609500-172.17.0.2-1732420229139/current, will proceed with Du for space computation calculation, 2024-11-24T03:50:30,703 WARN [Thread-2491 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/cluster_3dce4eda-8f3d-9ec4-f90a-813d4d6e2810/data/data4/current/BP-1214609500-172.17.0.2-1732420229139/current, will proceed with Du for space computation calculation, 2024-11-24T03:50:30,723 WARN [Thread-2466 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T03:50:30,725 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x118865279a218274 with lease ID 0xf09b3265bae9a68d: Processing first storage report for DS-a356f524-e9e3-4ad7-836b-7c840147aa86 from datanode DatanodeRegistration(127.0.0.1:33767, datanodeUuid=201454fc-c10e-4d7d-812c-23183a8fff44, infoPort=37621, infoSecurePort=0, ipcPort=45555, storageInfo=lv=-57;cid=testClusterID;nsid=1584354902;c=1732420229139) 2024-11-24T03:50:30,725 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x118865279a218274 with lease ID 0xf09b3265bae9a68d: from storage DS-a356f524-e9e3-4ad7-836b-7c840147aa86 node DatanodeRegistration(127.0.0.1:33767, datanodeUuid=201454fc-c10e-4d7d-812c-23183a8fff44, infoPort=37621, infoSecurePort=0, ipcPort=45555, storageInfo=lv=-57;cid=testClusterID;nsid=1584354902;c=1732420229139), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:50:30,725 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x118865279a218274 with lease ID 0xf09b3265bae9a68d: Processing first storage report for DS-f8c54b01-28f3-4a02-8811-b30edc0ec2c3 from datanode DatanodeRegistration(127.0.0.1:33767, datanodeUuid=201454fc-c10e-4d7d-812c-23183a8fff44, infoPort=37621, infoSecurePort=0, ipcPort=45555, storageInfo=lv=-57;cid=testClusterID;nsid=1584354902;c=1732420229139) 2024-11-24T03:50:30,725 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x118865279a218274 with lease ID 0xf09b3265bae9a68d: from storage DS-f8c54b01-28f3-4a02-8811-b30edc0ec2c3 node DatanodeRegistration(127.0.0.1:33767, datanodeUuid=201454fc-c10e-4d7d-812c-23183a8fff44, infoPort=37621, infoSecurePort=0, ipcPort=45555, storageInfo=lv=-57;cid=testClusterID;nsid=1584354902;c=1732420229139), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T03:50:30,788 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd 2024-11-24T03:50:30,791 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/cluster_3dce4eda-8f3d-9ec4-f90a-813d4d6e2810/zookeeper_0, clientPort=56015, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/cluster_3dce4eda-8f3d-9ec4-f90a-813d4d6e2810/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/cluster_3dce4eda-8f3d-9ec4-f90a-813d4d6e2810/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T03:50:30,792 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56015 2024-11-24T03:50:30,792 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:50:30,794 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:50:30,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741825_1001 (size=7) 2024-11-24T03:50:30,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741825_1001 (size=7) 2024-11-24T03:50:30,803 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82 with version=8 2024-11-24T03:50:30,803 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38669/user/jenkins/test-data/d4a0326a-5f7d-12f2-70d3-cb6b65ce6070/hbase-staging 2024-11-24T03:50:30,805 INFO [Time-limited test {}] client.ConnectionUtils(128): master/71d8d2d6408d:0 server-side Connection retries=45 2024-11-24T03:50:30,805 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:50:30,805 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T03:50:30,805 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T03:50:30,805 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:50:30,805 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T03:50:30,805 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T03:50:30,805 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T03:50:30,806 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45295 2024-11-24T03:50:30,807 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45295 connecting to ZooKeeper ensemble=127.0.0.1:56015 2024-11-24T03:50:30,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:452950x0, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T03:50:30,844 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45295-0x1016c40714b0000 connected 2024-11-24T03:50:30,907 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:50:30,908 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:50:30,910 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:50:30,910 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82, hbase.cluster.distributed=false 2024-11-24T03:50:30,911 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T03:50:30,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45295 2024-11-24T03:50:30,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45295 2024-11-24T03:50:30,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45295 2024-11-24T03:50:30,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45295 2024-11-24T03:50:30,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45295 2024-11-24T03:50:30,926 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/71d8d2d6408d:0 server-side Connection retries=45 2024-11-24T03:50:30,926 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:50:30,926 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T03:50:30,926 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T03:50:30,926 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T03:50:30,926 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T03:50:30,926 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T03:50:30,926 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T03:50:30,927 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41813 2024-11-24T03:50:30,928 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41813 connecting to ZooKeeper ensemble=127.0.0.1:56015 2024-11-24T03:50:30,929 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:50:30,930 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:50:30,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:418130x0, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T03:50:30,941 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:50:30,941 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41813-0x1016c40714b0001 connected 2024-11-24T03:50:30,941 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T03:50:30,941 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T03:50:30,942 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T03:50:30,943 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T03:50:30,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41813 2024-11-24T03:50:30,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41813 2024-11-24T03:50:30,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41813 2024-11-24T03:50:30,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41813 2024-11-24T03:50:30,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41813 2024-11-24T03:50:30,956 DEBUG [M:0;71d8d2d6408d:45295 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;71d8d2d6408d:45295 2024-11-24T03:50:30,956 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/71d8d2d6408d,45295,1732420230805 2024-11-24T03:50:30,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:50:30,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:50:30,965 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/71d8d2d6408d,45295,1732420230805 2024-11-24T03:50:30,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T03:50:30,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:30,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:30,974 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T03:50:30,974 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/71d8d2d6408d,45295,1732420230805 from backup master directory 2024-11-24T03:50:30,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:50:30,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/71d8d2d6408d,45295,1732420230805 2024-11-24T03:50:30,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T03:50:30,982 WARN [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T03:50:30,982 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=71d8d2d6408d,45295,1732420230805 2024-11-24T03:50:30,985 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/hbase.id] with ID: 6bf62064-ac99-4efc-a01e-7262ff31d9da 2024-11-24T03:50:30,985 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/.tmp/hbase.id 2024-11-24T03:50:30,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741826_1002 (size=42) 2024-11-24T03:50:30,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741826_1002 (size=42) 2024-11-24T03:50:30,991 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/.tmp/hbase.id]:[hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/hbase.id] 2024-11-24T03:50:31,001 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:50:31,002 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T03:50:31,003 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T03:50:31,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:31,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:31,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741827_1003 (size=196) 2024-11-24T03:50:31,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741827_1003 (size=196) 2024-11-24T03:50:31,018 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T03:50:31,019 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T03:50:31,019 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:50:31,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741828_1004 (size=1189) 2024-11-24T03:50:31,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741828_1004 (size=1189) 2024-11-24T03:50:31,025 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store 2024-11-24T03:50:31,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741829_1005 (size=34) 2024-11-24T03:50:31,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741829_1005 (size=34) 2024-11-24T03:50:31,031 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:50:31,031 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T03:50:31,031 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:50:31,031 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:50:31,031 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T03:50:31,031 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:50:31,031 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:50:31,031 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732420231031Disabling compacts and flushes for region at 1732420231031Disabling writes for close at 1732420231031Writing region close event to WAL at 1732420231031Closed at 1732420231031 2024-11-24T03:50:31,032 WARN [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/.initializing 2024-11-24T03:50:31,032 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/WALs/71d8d2d6408d,45295,1732420230805 2024-11-24T03:50:31,034 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C45295%2C1732420230805, suffix=, logDir=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/WALs/71d8d2d6408d,45295,1732420230805, archiveDir=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/oldWALs, maxLogs=10 2024-11-24T03:50:31,034 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C45295%2C1732420230805.1732420231034 2024-11-24T03:50:31,038 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/WALs/71d8d2d6408d,45295,1732420230805/71d8d2d6408d%2C45295%2C1732420230805.1732420231034 2024-11-24T03:50:31,044 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37621:37621),(127.0.0.1/127.0.0.1:36915:36915)] 2024-11-24T03:50:31,045 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:50:31,045 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:50:31,045 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:50:31,045 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:50:31,047 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:50:31,048 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T03:50:31,048 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:50:31,048 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:50:31,048 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:50:31,049 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T03:50:31,049 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:50:31,050 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:50:31,050 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:50:31,051 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T03:50:31,051 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:50:31,051 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:50:31,051 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:50:31,052 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T03:50:31,052 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:50:31,053 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T03:50:31,053 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:50:31,054 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:50:31,054 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:50:31,055 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:50:31,055 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:50:31,055 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T03:50:31,056 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T03:50:31,059 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:50:31,059 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=745724, jitterRate=-0.051763445138931274}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T03:50:31,060 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732420231045Initializing all the Stores at 1732420231046 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420231046Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420231046Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420231046Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420231047 (+1 ms)Cleaning up temporary data from old regions at 1732420231055 (+8 ms)Region opened successfully at 1732420231060 (+5 ms) 2024-11-24T03:50:31,060 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T03:50:31,063 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72a735ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=71d8d2d6408d/172.17.0.2:0 2024-11-24T03:50:31,064 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T03:50:31,064 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T03:50:31,064 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T03:50:31,065 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T03:50:31,065 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T03:50:31,065 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T03:50:31,065 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T03:50:31,068 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T03:50:31,069 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T03:50:31,081 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T03:50:31,082 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T03:50:31,083 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T03:50:31,090 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T03:50:31,090 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T03:50:31,092 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T03:50:31,098 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T03:50:31,099 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T03:50:31,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:31,107 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T03:50:31,108 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:31,110 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T03:50:31,120 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T03:50:31,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T03:50:31,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:31,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T03:50:31,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:31,132 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=71d8d2d6408d,45295,1732420230805, sessionid=0x1016c40714b0000, setting cluster-up flag (Was=false) 2024-11-24T03:50:31,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:31,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:31,173 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T03:50:31,174 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=71d8d2d6408d,45295,1732420230805 2024-11-24T03:50:31,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:31,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:31,215 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T03:50:31,216 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=71d8d2d6408d,45295,1732420230805 2024-11-24T03:50:31,217 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T03:50:31,218 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T03:50:31,219 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T03:50:31,219 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T03:50:31,219 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 71d8d2d6408d,45295,1732420230805 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T03:50:31,220 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:50:31,220 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:50:31,220 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:50:31,220 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=5, maxPoolSize=5 2024-11-24T03:50:31,220 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/71d8d2d6408d:0, corePoolSize=10, maxPoolSize=10 2024-11-24T03:50:31,220 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:50:31,220 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T03:50:31,220 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:50:31,221 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732420261221 2024-11-24T03:50:31,221 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T03:50:31,221 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T03:50:31,221 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T03:50:31,222 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T03:50:31,222 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T03:50:31,222 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T03:50:31,222 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:31,222 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:50:31,222 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T03:50:31,222 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T03:50:31,222 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T03:50:31,222 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T03:50:31,223 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:50:31,223 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T03:50:31,226 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T03:50:31,227 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T03:50:31,227 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420231227,5,FailOnTimeoutGroup] 2024-11-24T03:50:31,230 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420231228,5,FailOnTimeoutGroup] 2024-11-24T03:50:31,230 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:31,231 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T03:50:31,231 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:31,231 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:31,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741831_1007 (size=1321) 2024-11-24T03:50:31,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741831_1007 (size=1321) 2024-11-24T03:50:31,246 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.HRegionServer(746): ClusterId : 6bf62064-ac99-4efc-a01e-7262ff31d9da 2024-11-24T03:50:31,246 DEBUG [RS:0;71d8d2d6408d:41813 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T03:50:31,257 DEBUG [RS:0;71d8d2d6408d:41813 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T03:50:31,257 DEBUG [RS:0;71d8d2d6408d:41813 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T03:50:31,266 DEBUG [RS:0;71d8d2d6408d:41813 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T03:50:31,266 DEBUG [RS:0;71d8d2d6408d:41813 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77026b9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=71d8d2d6408d/172.17.0.2:0 2024-11-24T03:50:31,276 DEBUG [RS:0;71d8d2d6408d:41813 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;71d8d2d6408d:41813 2024-11-24T03:50:31,276 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T03:50:31,276 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T03:50:31,276 DEBUG [RS:0;71d8d2d6408d:41813 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T03:50:31,277 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.HRegionServer(2659): reportForDuty to master=71d8d2d6408d,45295,1732420230805 with port=41813, startcode=1732420230926 2024-11-24T03:50:31,277 DEBUG [RS:0;71d8d2d6408d:41813 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T03:50:31,279 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39991, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T03:50:31,280 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45295 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 71d8d2d6408d,41813,1732420230926 2024-11-24T03:50:31,280 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45295 {}] master.ServerManager(517): Registering regionserver=71d8d2d6408d,41813,1732420230926 2024-11-24T03:50:31,281 DEBUG [RS:0;71d8d2d6408d:41813 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82 2024-11-24T03:50:31,281 DEBUG [RS:0;71d8d2d6408d:41813 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41511 2024-11-24T03:50:31,281 DEBUG [RS:0;71d8d2d6408d:41813 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T03:50:31,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T03:50:31,290 DEBUG [RS:0;71d8d2d6408d:41813 {}] zookeeper.ZKUtil(111): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/71d8d2d6408d,41813,1732420230926 2024-11-24T03:50:31,290 WARN [RS:0;71d8d2d6408d:41813 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T03:50:31,290 INFO [RS:0;71d8d2d6408d:41813 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:50:31,290 DEBUG [RS:0;71d8d2d6408d:41813 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/WALs/71d8d2d6408d,41813,1732420230926 2024-11-24T03:50:31,291 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [71d8d2d6408d,41813,1732420230926] 2024-11-24T03:50:31,293 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T03:50:31,295 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T03:50:31,295 INFO [RS:0;71d8d2d6408d:41813 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T03:50:31,295 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:31,295 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T03:50:31,296 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T03:50:31,296 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:31,296 DEBUG [RS:0;71d8d2d6408d:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:50:31,296 DEBUG [RS:0;71d8d2d6408d:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:50:31,296 DEBUG [RS:0;71d8d2d6408d:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:50:31,296 DEBUG [RS:0;71d8d2d6408d:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:50:31,296 DEBUG [RS:0;71d8d2d6408d:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:50:31,297 DEBUG [RS:0;71d8d2d6408d:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/71d8d2d6408d:0, corePoolSize=2, maxPoolSize=2 2024-11-24T03:50:31,297 DEBUG [RS:0;71d8d2d6408d:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:50:31,297 DEBUG [RS:0;71d8d2d6408d:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:50:31,297 DEBUG [RS:0;71d8d2d6408d:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:50:31,297 DEBUG [RS:0;71d8d2d6408d:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:50:31,297 DEBUG [RS:0;71d8d2d6408d:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:50:31,297 DEBUG [RS:0;71d8d2d6408d:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/71d8d2d6408d:0, corePoolSize=1, maxPoolSize=1 2024-11-24T03:50:31,297 DEBUG [RS:0;71d8d2d6408d:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/71d8d2d6408d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T03:50:31,297 DEBUG [RS:0;71d8d2d6408d:41813 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/71d8d2d6408d:0, corePoolSize=3, maxPoolSize=3 2024-11-24T03:50:31,297 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:31,297 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:31,297 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:31,297 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:31,297 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:31,297 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,41813,1732420230926-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T03:50:31,310 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T03:50:31,310 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,41813,1732420230926-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:31,310 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:31,310 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.Replication(171): 71d8d2d6408d,41813,1732420230926 started 2024-11-24T03:50:31,324 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:31,324 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.HRegionServer(1482): Serving as 71d8d2d6408d,41813,1732420230926, RpcServer on 71d8d2d6408d/172.17.0.2:41813, sessionid=0x1016c40714b0001 2024-11-24T03:50:31,324 DEBUG [RS:0;71d8d2d6408d:41813 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T03:50:31,324 DEBUG [RS:0;71d8d2d6408d:41813 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 71d8d2d6408d,41813,1732420230926 2024-11-24T03:50:31,324 DEBUG [RS:0;71d8d2d6408d:41813 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '71d8d2d6408d,41813,1732420230926' 2024-11-24T03:50:31,324 DEBUG [RS:0;71d8d2d6408d:41813 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T03:50:31,324 DEBUG [RS:0;71d8d2d6408d:41813 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T03:50:31,325 DEBUG [RS:0;71d8d2d6408d:41813 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T03:50:31,325 DEBUG [RS:0;71d8d2d6408d:41813 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T03:50:31,325 DEBUG [RS:0;71d8d2d6408d:41813 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 71d8d2d6408d,41813,1732420230926 2024-11-24T03:50:31,325 DEBUG [RS:0;71d8d2d6408d:41813 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '71d8d2d6408d,41813,1732420230926' 2024-11-24T03:50:31,325 DEBUG [RS:0;71d8d2d6408d:41813 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T03:50:31,325 DEBUG [RS:0;71d8d2d6408d:41813 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T03:50:31,325 DEBUG [RS:0;71d8d2d6408d:41813 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T03:50:31,325 INFO [RS:0;71d8d2d6408d:41813 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T03:50:31,326 INFO [RS:0;71d8d2d6408d:41813 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T03:50:31,427 INFO [RS:0;71d8d2d6408d:41813 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C41813%2C1732420230926, suffix=, logDir=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/WALs/71d8d2d6408d,41813,1732420230926, archiveDir=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/oldWALs, maxLogs=32 2024-11-24T03:50:31,428 INFO [RS:0;71d8d2d6408d:41813 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C41813%2C1732420230926.1732420231427 2024-11-24T03:50:31,433 INFO [RS:0;71d8d2d6408d:41813 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/WALs/71d8d2d6408d,41813,1732420230926/71d8d2d6408d%2C41813%2C1732420230926.1732420231427 2024-11-24T03:50:31,433 DEBUG [RS:0;71d8d2d6408d:41813 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36915:36915),(127.0.0.1/127.0.0.1:37621:37621)] 2024-11-24T03:50:31,638 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T03:50:31,638 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82 2024-11-24T03:50:31,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741833_1009 (size=32) 2024-11-24T03:50:31,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741833_1009 (size=32) 2024-11-24T03:50:31,644 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:50:31,645 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T03:50:31,646 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T03:50:31,647 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:50:31,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:50:31,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T03:50:31,648 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T03:50:31,648 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:50:31,649 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:50:31,649 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T03:50:31,650 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T03:50:31,650 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:50:31,650 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:50:31,650 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T03:50:31,651 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T03:50:31,651 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:50:31,652 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:50:31,652 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T03:50:31,652 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/data/hbase/meta/1588230740 2024-11-24T03:50:31,653 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/data/hbase/meta/1588230740 2024-11-24T03:50:31,654 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T03:50:31,654 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T03:50:31,654 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T03:50:31,655 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T03:50:31,657 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T03:50:31,657 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716948, jitterRate=-0.08835390210151672}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T03:50:31,658 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732420231644Initializing all the Stores at 1732420231645 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420231645Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420231645Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420231645Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420231645Cleaning up temporary data from old regions at 1732420231654 (+9 ms)Region opened successfully at 1732420231657 (+3 ms) 2024-11-24T03:50:31,658 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T03:50:31,658 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T03:50:31,658 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T03:50:31,658 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T03:50:31,658 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T03:50:31,658 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T03:50:31,658 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732420231658Disabling compacts and flushes for region at 1732420231658Disabling writes for close at 1732420231658Writing region close event to WAL at 1732420231658Closed at 1732420231658 2024-11-24T03:50:31,659 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:50:31,659 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T03:50:31,659 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T03:50:31,660 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T03:50:31,661 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T03:50:31,811 DEBUG [71d8d2d6408d:45295 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T03:50:31,812 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=71d8d2d6408d,41813,1732420230926 2024-11-24T03:50:31,813 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 71d8d2d6408d,41813,1732420230926, state=OPENING 2024-11-24T03:50:31,857 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T03:50:31,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:31,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:31,865 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:50:31,865 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T03:50:31,866 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=71d8d2d6408d,41813,1732420230926}] 2024-11-24T03:50:31,866 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:50:32,019 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T03:50:32,021 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49249, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T03:50:32,027 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T03:50:32,027 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:50:32,029 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=71d8d2d6408d%2C41813%2C1732420230926.meta, suffix=.meta, logDir=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/WALs/71d8d2d6408d,41813,1732420230926, archiveDir=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/oldWALs, maxLogs=32 2024-11-24T03:50:32,030 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 71d8d2d6408d%2C41813%2C1732420230926.meta.1732420232030.meta 2024-11-24T03:50:32,036 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/WALs/71d8d2d6408d,41813,1732420230926/71d8d2d6408d%2C41813%2C1732420230926.meta.1732420232030.meta 2024-11-24T03:50:32,041 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37621:37621),(127.0.0.1/127.0.0.1:36915:36915)] 2024-11-24T03:50:32,045 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T03:50:32,045 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T03:50:32,045 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T03:50:32,045 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T03:50:32,045 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T03:50:32,045 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T03:50:32,045 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T03:50:32,045 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T03:50:32,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T03:50:32,047 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T03:50:32,047 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:50:32,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:50:32,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T03:50:32,048 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T03:50:32,048 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:50:32,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:50:32,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T03:50:32,049 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T03:50:32,049 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:50:32,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:50:32,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T03:50:32,050 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T03:50:32,051 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T03:50:32,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T03:50:32,051 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T03:50:32,052 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/data/hbase/meta/1588230740 2024-11-24T03:50:32,052 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/data/hbase/meta/1588230740 2024-11-24T03:50:32,053 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T03:50:32,053 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T03:50:32,054 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T03:50:32,055 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T03:50:32,055 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=702103, jitterRate=-0.10722991824150085}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T03:50:32,055 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T03:50:32,056 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732420232045Writing region info on filesystem at 1732420232045Initializing all the Stores at 1732420232046 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420232046Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420232046Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732420232046Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732420232046Cleaning up temporary data from old regions at 1732420232053 (+7 ms)Running coprocessor post-open hooks at 1732420232055 (+2 ms)Region opened successfully at 1732420232056 (+1 ms) 2024-11-24T03:50:32,057 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732420232018 2024-11-24T03:50:32,058 DEBUG [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T03:50:32,059 INFO [RS_OPEN_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T03:50:32,059 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=71d8d2d6408d,41813,1732420230926 2024-11-24T03:50:32,060 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 71d8d2d6408d,41813,1732420230926, state=OPEN 2024-11-24T03:50:32,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T03:50:32,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T03:50:32,095 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=71d8d2d6408d,41813,1732420230926 2024-11-24T03:50:32,096 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:50:32,096 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T03:50:32,098 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T03:50:32,098 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=71d8d2d6408d,41813,1732420230926 in 229 msec 2024-11-24T03:50:32,100 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T03:50:32,100 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 439 msec 2024-11-24T03:50:32,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,43259,1732420032702/71d8d2d6408d%2C43259%2C1732420032702.meta.1732420033733.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:32,101 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T03:50:32,101 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T03:50:32,102 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T03:50:32,102 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=71d8d2d6408d,41813,1732420230926, seqNum=-1] 2024-11-24T03:50:32,102 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T03:50:32,104 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42727, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T03:50:32,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 890 msec 2024-11-24T03:50:32,108 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732420232108, completionTime=-1 2024-11-24T03:50:32,109 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T03:50:32,109 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T03:50:32,108 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44445/user/jenkins/test-data/8f062863-ccab-031a-36f5-b9ab0f65855a/WALs/71d8d2d6408d,32933,1732420033969/71d8d2d6408d%2C32933%2C1732420033969.1732420034206 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T03:50:32,110 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T03:50:32,110 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732420292110 2024-11-24T03:50:32,110 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732420352110 2024-11-24T03:50:32,110 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T03:50:32,111 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,45295,1732420230805-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:32,111 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,45295,1732420230805-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:32,111 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,45295,1732420230805-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:32,111 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-71d8d2d6408d:45295, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:32,111 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:32,111 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:32,113 DEBUG [master/71d8d2d6408d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T03:50:32,114 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.132sec 2024-11-24T03:50:32,114 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T03:50:32,114 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T03:50:32,114 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T03:50:32,114 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T03:50:32,114 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T03:50:32,115 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,45295,1732420230805-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T03:50:32,115 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,45295,1732420230805-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T03:50:32,117 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T03:50:32,117 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T03:50:32,117 INFO [master/71d8d2d6408d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=71d8d2d6408d,45295,1732420230805-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T03:50:32,147 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68424a73, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:50:32,147 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 71d8d2d6408d,45295,-1 for getting cluster id 2024-11-24T03:50:32,147 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T03:50:32,148 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6bf62064-ac99-4efc-a01e-7262ff31d9da' 2024-11-24T03:50:32,148 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T03:50:32,149 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6bf62064-ac99-4efc-a01e-7262ff31d9da" 2024-11-24T03:50:32,149 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57e92c2b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:50:32,149 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [71d8d2d6408d,45295,-1] 2024-11-24T03:50:32,149 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T03:50:32,149 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:50:32,150 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33610, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T03:50:32,151 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75fd098f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T03:50:32,151 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T03:50:32,152 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=71d8d2d6408d,41813,1732420230926, seqNum=-1] 2024-11-24T03:50:32,153 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T03:50:32,154 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54484, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T03:50:32,155 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=71d8d2d6408d,45295,1732420230805 2024-11-24T03:50:32,156 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T03:50:32,158 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T03:50:32,158 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T03:50:32,160 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/WALs/test.com,8080,1, archiveDir=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/oldWALs, maxLogs=32 2024-11-24T03:50:32,160 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732420232160 2024-11-24T03:50:32,165 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/WALs/test.com,8080,1/test.com%2C8080%2C1.1732420232160 2024-11-24T03:50:32,165 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37621:37621),(127.0.0.1/127.0.0.1:36915:36915)] 2024-11-24T03:50:32,166 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732420232166 2024-11-24T03:50:32,175 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,176 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,176 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,176 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,176 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,176 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/WALs/test.com,8080,1/test.com%2C8080%2C1.1732420232160 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/WALs/test.com,8080,1/test.com%2C8080%2C1.1732420232166 2024-11-24T03:50:32,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741835_1011 (size=93) 2024-11-24T03:50:32,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741835_1011 (size=93) 2024-11-24T03:50:32,179 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/WALs/test.com,8080,1/test.com%2C8080%2C1.1732420232160 to hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/oldWALs/test.com%2C8080%2C1.1732420232160 2024-11-24T03:50:32,183 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36915:36915),(127.0.0.1/127.0.0.1:37621:37621)] 2024-11-24T03:50:32,183 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,183 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,183 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,183 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,184 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741836_1012 (size=93) 2024-11-24T03:50:32,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741836_1012 (size=93) 2024-11-24T03:50:32,187 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/oldWALs 2024-11-24T03:50:32,187 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732420232166) 2024-11-24T03:50:32,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T03:50:32,187 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T03:50:32,187 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:50:32,187 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:50:32,187 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:50:32,187 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T03:50:32,187 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T03:50:32,187 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1910684927, stopped=false 2024-11-24T03:50:32,187 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=71d8d2d6408d,45295,1732420230805 2024-11-24T03:50:32,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T03:50:32,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:32,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T03:50:32,215 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T03:50:32,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:32,215 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T03:50:32,215 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:50:32,215 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:50:32,215 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:50:32,215 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '71d8d2d6408d,41813,1732420230926' ***** 2024-11-24T03:50:32,215 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T03:50:32,216 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T03:50:32,216 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T03:50:32,216 INFO [RS:0;71d8d2d6408d:41813 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T03:50:32,216 INFO [RS:0;71d8d2d6408d:41813 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T03:50:32,216 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.HRegionServer(959): stopping server 71d8d2d6408d,41813,1732420230926 2024-11-24T03:50:32,216 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T03:50:32,216 INFO [RS:0;71d8d2d6408d:41813 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;71d8d2d6408d:41813. 2024-11-24T03:50:32,216 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T03:50:32,216 DEBUG [RS:0;71d8d2d6408d:41813 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T03:50:32,216 DEBUG [RS:0;71d8d2d6408d:41813 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:50:32,216 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T03:50:32,216 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T03:50:32,216 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T03:50:32,216 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T03:50:32,216 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T03:50:32,216 DEBUG [RS:0;71d8d2d6408d:41813 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-24T03:50:32,217 DEBUG [RS:0;71d8d2d6408d:41813 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T03:50:32,217 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T03:50:32,217 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T03:50:32,217 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T03:50:32,217 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T03:50:32,217 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T03:50:32,217 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-24T03:50:32,231 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/data/hbase/meta/1588230740/.tmp/ns/a69dd9932a4b4c24a1f8dff3884400ba is 43, key is default/ns:d/1732420232104/Put/seqid=0 2024-11-24T03:50:32,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741837_1013 (size=5153) 2024-11-24T03:50:32,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741837_1013 (size=5153) 2024-11-24T03:50:32,236 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/data/hbase/meta/1588230740/.tmp/ns/a69dd9932a4b4c24a1f8dff3884400ba 2024-11-24T03:50:32,242 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/data/hbase/meta/1588230740/.tmp/ns/a69dd9932a4b4c24a1f8dff3884400ba as hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/data/hbase/meta/1588230740/ns/a69dd9932a4b4c24a1f8dff3884400ba 2024-11-24T03:50:32,246 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/data/hbase/meta/1588230740/ns/a69dd9932a4b4c24a1f8dff3884400ba, entries=2, sequenceid=6, filesize=5.0 K 2024-11-24T03:50:32,248 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false 2024-11-24T03:50:32,248 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T03:50:32,252 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-24T03:50:32,253 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T03:50:32,253 INFO [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T03:50:32,253 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732420232217Running coprocessor pre-close hooks at 1732420232217Disabling compacts and flushes for region at 1732420232217Disabling writes for close at 1732420232217Obtaining lock to block concurrent updates at 1732420232217Preparing flush snapshotting stores in 1588230740 at 1732420232217Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732420232217Flushing stores of hbase:meta,,1.1588230740 at 1732420232218 (+1 ms)Flushing 1588230740/ns: creating writer at 1732420232218Flushing 1588230740/ns: appending metadata at 1732420232231 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1732420232231Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@528879d6: reopening flushed file at 1732420232241 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false at 1732420232248 (+7 ms)Writing region close event to WAL at 1732420232249 (+1 ms)Running coprocessor post-close hooks at 1732420232253 (+4 ms)Closed at 1732420232253 2024-11-24T03:50:32,253 DEBUG [RS_CLOSE_META-regionserver/71d8d2d6408d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T03:50:32,369 INFO [regionserver/71d8d2d6408d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T03:50:32,369 INFO [regionserver/71d8d2d6408d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T03:50:32,417 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.HRegionServer(976): stopping server 71d8d2d6408d,41813,1732420230926; all regions closed. 2024-11-24T03:50:32,417 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,417 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,418 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,418 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,418 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741834_1010 (size=1152) 2024-11-24T03:50:32,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741834_1010 (size=1152) 2024-11-24T03:50:32,425 DEBUG [RS:0;71d8d2d6408d:41813 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/oldWALs 2024-11-24T03:50:32,425 INFO [RS:0;71d8d2d6408d:41813 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 71d8d2d6408d%2C41813%2C1732420230926.meta:.meta(num 1732420232030) 2024-11-24T03:50:32,426 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,426 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,426 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,426 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,427 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741832_1008 (size=93) 2024-11-24T03:50:32,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741832_1008 (size=93) 2024-11-24T03:50:32,431 DEBUG [RS:0;71d8d2d6408d:41813 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/oldWALs 2024-11-24T03:50:32,431 INFO [RS:0;71d8d2d6408d:41813 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 71d8d2d6408d%2C41813%2C1732420230926:(num 1732420231427) 2024-11-24T03:50:32,431 DEBUG [RS:0;71d8d2d6408d:41813 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T03:50:32,431 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T03:50:32,431 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T03:50:32,431 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.ChoreService(370): Chore service for: regionserver/71d8d2d6408d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T03:50:32,431 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T03:50:32,431 INFO [regionserver/71d8d2d6408d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T03:50:32,431 INFO [RS:0;71d8d2d6408d:41813 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41813 2024-11-24T03:50:32,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/71d8d2d6408d,41813,1732420230926 2024-11-24T03:50:32,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T03:50:32,445 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T03:50:32,457 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [71d8d2d6408d,41813,1732420230926] 2024-11-24T03:50:32,465 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/71d8d2d6408d,41813,1732420230926 already deleted, retry=false 2024-11-24T03:50:32,465 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 71d8d2d6408d,41813,1732420230926 expired; onlineServers=0 2024-11-24T03:50:32,465 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '71d8d2d6408d,45295,1732420230805' ***** 2024-11-24T03:50:32,465 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T03:50:32,465 INFO [M:0;71d8d2d6408d:45295 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T03:50:32,465 INFO [M:0;71d8d2d6408d:45295 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T03:50:32,465 DEBUG [M:0;71d8d2d6408d:45295 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T03:50:32,465 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T03:50:32,465 DEBUG [M:0;71d8d2d6408d:45295 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T03:50:32,465 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420231227 {}] cleaner.HFileCleaner(306): Exit Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.large.0-1732420231227,5,FailOnTimeoutGroup] 2024-11-24T03:50:32,465 DEBUG [master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420231228 {}] cleaner.HFileCleaner(306): Exit Thread[master/71d8d2d6408d:0:becomeActiveMaster-HFileCleaner.small.0-1732420231228,5,FailOnTimeoutGroup] 2024-11-24T03:50:32,466 INFO [M:0;71d8d2d6408d:45295 {}] hbase.ChoreService(370): Chore service for: master/71d8d2d6408d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T03:50:32,466 INFO [M:0;71d8d2d6408d:45295 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T03:50:32,466 DEBUG [M:0;71d8d2d6408d:45295 {}] master.HMaster(1795): Stopping service threads 2024-11-24T03:50:32,466 INFO [M:0;71d8d2d6408d:45295 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T03:50:32,466 INFO [M:0;71d8d2d6408d:45295 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T03:50:32,467 INFO [M:0;71d8d2d6408d:45295 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T03:50:32,467 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T03:50:32,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T03:50:32,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T03:50:32,474 DEBUG [M:0;71d8d2d6408d:45295 {}] zookeeper.ZKUtil(347): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T03:50:32,474 WARN [M:0;71d8d2d6408d:45295 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T03:50:32,475 INFO [M:0;71d8d2d6408d:45295 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/.lastflushedseqids 2024-11-24T03:50:32,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741838_1014 (size=99) 2024-11-24T03:50:32,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741838_1014 (size=99) 2024-11-24T03:50:32,485 INFO [M:0;71d8d2d6408d:45295 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T03:50:32,485 INFO [M:0;71d8d2d6408d:45295 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T03:50:32,485 DEBUG [M:0;71d8d2d6408d:45295 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T03:50:32,485 INFO [M:0;71d8d2d6408d:45295 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:50:32,485 DEBUG [M:0;71d8d2d6408d:45295 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:50:32,485 DEBUG [M:0;71d8d2d6408d:45295 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T03:50:32,485 DEBUG [M:0;71d8d2d6408d:45295 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:50:32,485 INFO [M:0;71d8d2d6408d:45295 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-24T03:50:32,505 DEBUG [M:0;71d8d2d6408d:45295 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/88b17cb66fb94c5086831abe44044219 is 82, key is hbase:meta,,1/info:regioninfo/1732420232059/Put/seqid=0 2024-11-24T03:50:32,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741839_1015 (size=5672) 2024-11-24T03:50:32,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741839_1015 (size=5672) 2024-11-24T03:50:32,510 INFO [M:0;71d8d2d6408d:45295 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/88b17cb66fb94c5086831abe44044219 2024-11-24T03:50:32,529 DEBUG [M:0;71d8d2d6408d:45295 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/16873d07c4b949ea8bb01b86e72af332 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732420232108/Put/seqid=0 2024-11-24T03:50:32,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741840_1016 (size=5275) 2024-11-24T03:50:32,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741840_1016 (size=5275) 2024-11-24T03:50:32,534 INFO [M:0;71d8d2d6408d:45295 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/16873d07c4b949ea8bb01b86e72af332 2024-11-24T03:50:32,556 DEBUG [M:0;71d8d2d6408d:45295 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d33f689eb7bb48f9be3cf7073bca3ed0 is 69, key is 71d8d2d6408d,41813,1732420230926/rs:state/1732420231280/Put/seqid=0 2024-11-24T03:50:32,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:50:32,557 INFO [RS:0;71d8d2d6408d:41813 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T03:50:32,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41813-0x1016c40714b0001, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:50:32,557 INFO [RS:0;71d8d2d6408d:41813 {}] regionserver.HRegionServer(1031): Exiting; stopping=71d8d2d6408d,41813,1732420230926; zookeeper connection closed. 2024-11-24T03:50:32,557 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@182ee6eb {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@182ee6eb 2024-11-24T03:50:32,557 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T03:50:32,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741841_1017 (size=5156) 2024-11-24T03:50:32,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741841_1017 (size=5156) 2024-11-24T03:50:32,563 INFO [M:0;71d8d2d6408d:45295 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d33f689eb7bb48f9be3cf7073bca3ed0 2024-11-24T03:50:32,581 DEBUG [M:0;71d8d2d6408d:45295 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5736226a3bc3401c9750d85154163d68 is 52, key is load_balancer_on/state:d/1732420232157/Put/seqid=0 2024-11-24T03:50:32,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741842_1018 (size=5056) 2024-11-24T03:50:32,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741842_1018 (size=5056) 2024-11-24T03:50:32,585 INFO [M:0;71d8d2d6408d:45295 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5736226a3bc3401c9750d85154163d68 2024-11-24T03:50:32,590 DEBUG [M:0;71d8d2d6408d:45295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/88b17cb66fb94c5086831abe44044219 as hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/88b17cb66fb94c5086831abe44044219 2024-11-24T03:50:32,595 INFO [M:0;71d8d2d6408d:45295 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/88b17cb66fb94c5086831abe44044219, entries=8, sequenceid=29, filesize=5.5 K 2024-11-24T03:50:32,596 DEBUG [M:0;71d8d2d6408d:45295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/16873d07c4b949ea8bb01b86e72af332 as hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/16873d07c4b949ea8bb01b86e72af332 2024-11-24T03:50:32,601 INFO [M:0;71d8d2d6408d:45295 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/16873d07c4b949ea8bb01b86e72af332, entries=3, sequenceid=29, filesize=5.2 K 2024-11-24T03:50:32,601 DEBUG [M:0;71d8d2d6408d:45295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d33f689eb7bb48f9be3cf7073bca3ed0 as hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d33f689eb7bb48f9be3cf7073bca3ed0 2024-11-24T03:50:32,606 INFO [M:0;71d8d2d6408d:45295 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d33f689eb7bb48f9be3cf7073bca3ed0, entries=1, sequenceid=29, filesize=5.0 K 2024-11-24T03:50:32,607 DEBUG [M:0;71d8d2d6408d:45295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5736226a3bc3401c9750d85154163d68 as hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5736226a3bc3401c9750d85154163d68 2024-11-24T03:50:32,611 INFO [M:0;71d8d2d6408d:45295 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41511/user/jenkins/test-data/7bff1fbb-4bc6-1aeb-0cf0-37326be55c82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5736226a3bc3401c9750d85154163d68, entries=1, sequenceid=29, filesize=4.9 K 2024-11-24T03:50:32,613 INFO [M:0;71d8d2d6408d:45295 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=29, compaction requested=false 2024-11-24T03:50:32,614 INFO [M:0;71d8d2d6408d:45295 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T03:50:32,614 DEBUG [M:0;71d8d2d6408d:45295 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732420232485Disabling compacts and flushes for region at 1732420232485Disabling writes for close at 1732420232485Obtaining lock to block concurrent updates at 1732420232485Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732420232485Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732420232486 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732420232487 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732420232487Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732420232505 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732420232505Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732420232515 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732420232529 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732420232529Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732420232538 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732420232555 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732420232555Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732420232567 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732420232580 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732420232580Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@70edee84: reopening flushed file at 1732420232589 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@734e1017: reopening flushed file at 1732420232595 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6fc676d8: reopening flushed file at 1732420232601 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d07fbae: reopening flushed file at 1732420232606 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=29, compaction requested=false at 1732420232613 (+7 ms)Writing region close event to WAL at 1732420232614 (+1 ms)Closed at 1732420232614 2024-11-24T03:50:32,615 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,615 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,615 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,615 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,615 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T03:50:32,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741830_1006 (size=10311) 2024-11-24T03:50:32,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33767 is added to blk_1073741830_1006 (size=10311) 2024-11-24T03:50:32,617 INFO [M:0;71d8d2d6408d:45295 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T03:50:32,617 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T03:50:32,617 INFO [M:0;71d8d2d6408d:45295 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45295 2024-11-24T03:50:32,617 INFO [M:0;71d8d2d6408d:45295 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T03:50:32,746 INFO [M:0;71d8d2d6408d:45295 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T03:50:32,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:50:32,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45295-0x1016c40714b0000, quorum=127.0.0.1:56015, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T03:50:32,748 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a40f8b4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:50:32,749 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@185b315d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:50:32,749 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:50:32,749 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c79f83b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:50:32,749 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5d1d035a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/hadoop.log.dir/,STOPPED} 2024-11-24T03:50:32,759 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:50:32,759 WARN [BP-1214609500-172.17.0.2-1732420229139 heartbeating to localhost/127.0.0.1:41511 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:50:32,759 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:50:32,759 WARN [BP-1214609500-172.17.0.2-1732420229139 heartbeating to localhost/127.0.0.1:41511 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1214609500-172.17.0.2-1732420229139 (Datanode Uuid 201454fc-c10e-4d7d-812c-23183a8fff44) service to localhost/127.0.0.1:41511 2024-11-24T03:50:32,759 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/cluster_3dce4eda-8f3d-9ec4-f90a-813d4d6e2810/data/data3/current/BP-1214609500-172.17.0.2-1732420229139 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:50:32,760 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/cluster_3dce4eda-8f3d-9ec4-f90a-813d4d6e2810/data/data4/current/BP-1214609500-172.17.0.2-1732420229139 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:50:32,760 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:50:32,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3353f5c1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T03:50:32,770 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75fc85f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:50:32,770 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:50:32,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@465e8484{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:50:32,771 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24e08866{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/hadoop.log.dir/,STOPPED} 2024-11-24T03:50:32,772 WARN [BP-1214609500-172.17.0.2-1732420229139 heartbeating to localhost/127.0.0.1:41511 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T03:50:32,772 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T03:50:32,772 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T03:50:32,772 WARN [BP-1214609500-172.17.0.2-1732420229139 heartbeating to localhost/127.0.0.1:41511 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1214609500-172.17.0.2-1732420229139 (Datanode Uuid 22fbc400-1564-4a5b-8e21-d80388bc1aff) service to localhost/127.0.0.1:41511 2024-11-24T03:50:32,772 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/cluster_3dce4eda-8f3d-9ec4-f90a-813d4d6e2810/data/data1/current/BP-1214609500-172.17.0.2-1732420229139 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:50:32,773 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/cluster_3dce4eda-8f3d-9ec4-f90a-813d4d6e2810/data/data2/current/BP-1214609500-172.17.0.2-1732420229139 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T03:50:32,773 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T03:50:32,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f697625{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T03:50:32,778 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2054df1e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T03:50:32,778 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T03:50:32,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f9ff378{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T03:50:32,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@416a4238{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c366f090-a314-cf66-c22a-3124423846bd/hadoop.log.dir/,STOPPED} 2024-11-24T03:50:32,783 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T03:50:32,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T03:50:32,807 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 230) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:41511 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41511 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:41511 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41511 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:41511 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41511 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:41511 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41511 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=537 (was 512) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=251 (was 239) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6346 (was 6354)