2024-11-07 12:50:35,473 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-07 12:50:35,488 main DEBUG Took 0.013084 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-07 12:50:35,489 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-07 12:50:35,489 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-07 12:50:35,490 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-07 12:50:35,491 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:50:35,499 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-07 12:50:35,512 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:50:35,514 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:50:35,515 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:50:35,515 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:50:35,516 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:50:35,516 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:50:35,517 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:50:35,518 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:50:35,518 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:50:35,519 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:50:35,520 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:50:35,520 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:50:35,521 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:50:35,521 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:50:35,522 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:50:35,523 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:50:35,523 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:50:35,524 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:50:35,524 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:50:35,525 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:50:35,525 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:50:35,526 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:50:35,526 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:50:35,527 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:50:35,527 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:50:35,528 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-07 12:50:35,530 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:50:35,531 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-07 12:50:35,533 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-07 12:50:35,534 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-07 12:50:35,535 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-07 12:50:35,536 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-07 12:50:35,545 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-07 12:50:35,548 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-07 12:50:35,549 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-07 12:50:35,550 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-07 12:50:35,550 main DEBUG createAppenders(={Console}) 2024-11-07 12:50:35,551 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-07 12:50:35,551 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-07 12:50:35,551 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-07 12:50:35,552 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-07 12:50:35,552 main DEBUG OutputStream closed 2024-11-07 12:50:35,552 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-07 12:50:35,552 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-07 12:50:35,553 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-07 12:50:35,617 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-07 12:50:35,619 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-07 12:50:35,620 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-07 12:50:35,621 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-07 12:50:35,622 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-07 12:50:35,622 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-07 12:50:35,622 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-07 12:50:35,622 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-07 12:50:35,623 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-07 12:50:35,623 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-07 12:50:35,623 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-07 12:50:35,623 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-07 12:50:35,624 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-07 12:50:35,624 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-07 12:50:35,624 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-07 12:50:35,625 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-07 12:50:35,625 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-07 12:50:35,626 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-07 12:50:35,628 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-07 12:50:35,628 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-07 12:50:35,629 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-07 12:50:35,629 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-07T12:50:35,865 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48 2024-11-07 12:50:35,868 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-07 12:50:35,869 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-07T12:50:35,878 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-07T12:50:35,912 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=263, ProcessCount=11, AvailableMemoryMB=9476 2024-11-07T12:50:35,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T12:50:35,934 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/cluster_302973cd-33a2-b9c9-1f63-db82430c09fc, deleteOnExit=true 2024-11-07T12:50:35,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-07T12:50:35,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/test.cache.data in system properties and HBase conf 2024-11-07T12:50:35,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T12:50:35,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/hadoop.log.dir in system properties and HBase conf 2024-11-07T12:50:35,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T12:50:35,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T12:50:35,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-07T12:50:36,031 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-07T12:50:36,124 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T12:50:36,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:50:36,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:50:36,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T12:50:36,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:50:36,130 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T12:50:36,130 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T12:50:36,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:50:36,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:50:36,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T12:50:36,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/nfs.dump.dir in system properties and HBase conf 2024-11-07T12:50:36,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/java.io.tmpdir in system properties and HBase conf 2024-11-07T12:50:36,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:50:36,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T12:50:36,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T12:50:36,640 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:50:36,971 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-07T12:50:37,056 INFO [Time-limited test {}] log.Log(170): Logging initialized @2324ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-07T12:50:37,138 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:50:37,200 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:50:37,220 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:50:37,220 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:50:37,221 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:50:37,234 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:50:37,237 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:50:37,238 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:50:37,430 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/java.io.tmpdir/jetty-localhost-40309-hadoop-hdfs-3_4_1-tests_jar-_-any-9923899732925509430/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:50:37,440 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:40309} 2024-11-07T12:50:37,441 INFO [Time-limited test {}] server.Server(415): Started @2710ms 2024-11-07T12:50:37,475 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:50:37,834 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:50:37,843 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:50:37,845 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:50:37,845 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:50:37,846 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:50:37,847 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:50:37,847 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:50:37,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/java.io.tmpdir/jetty-localhost-39485-hadoop-hdfs-3_4_1-tests_jar-_-any-9147597242560801322/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:50:37,968 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:39485} 2024-11-07T12:50:37,968 INFO [Time-limited test {}] server.Server(415): Started @3237ms 2024-11-07T12:50:38,026 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:50:38,152 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:50:38,159 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:50:38,160 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:50:38,161 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:50:38,161 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:50:38,162 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:50:38,162 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:50:38,300 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/java.io.tmpdir/jetty-localhost-42877-hadoop-hdfs-3_4_1-tests_jar-_-any-16520900789165554239/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:50:38,301 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:42877} 2024-11-07T12:50:38,301 INFO [Time-limited test {}] server.Server(415): Started @3570ms 2024-11-07T12:50:38,304 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:50:38,451 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/cluster_302973cd-33a2-b9c9-1f63-db82430c09fc/data/data4/current/BP-929681960-172.17.0.2-1730983836734/current, will proceed with Du for space computation calculation, 2024-11-07T12:50:38,451 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/cluster_302973cd-33a2-b9c9-1f63-db82430c09fc/data/data3/current/BP-929681960-172.17.0.2-1730983836734/current, will proceed with Du for space computation calculation, 2024-11-07T12:50:38,451 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/cluster_302973cd-33a2-b9c9-1f63-db82430c09fc/data/data2/current/BP-929681960-172.17.0.2-1730983836734/current, will proceed with Du for space computation calculation, 2024-11-07T12:50:38,451 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/cluster_302973cd-33a2-b9c9-1f63-db82430c09fc/data/data1/current/BP-929681960-172.17.0.2-1730983836734/current, will proceed with Du for space computation calculation, 2024-11-07T12:50:38,496 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:50:38,496 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:50:38,574 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14c604ed36ecac9a with lease ID 0x340e93e49e56989: Processing first storage report for DS-5250b43d-a6e2-4e1f-a284-f227144507ec from datanode DatanodeRegistration(127.0.0.1:38133, datanodeUuid=37a16055-f5d5-4efc-a01e-5deeb55ed03a, infoPort=40741, infoSecurePort=0, ipcPort=37341, storageInfo=lv=-57;cid=testClusterID;nsid=1159085042;c=1730983836734) 2024-11-07T12:50:38,576 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14c604ed36ecac9a with lease ID 0x340e93e49e56989: from storage DS-5250b43d-a6e2-4e1f-a284-f227144507ec node DatanodeRegistration(127.0.0.1:38133, datanodeUuid=37a16055-f5d5-4efc-a01e-5deeb55ed03a, infoPort=40741, infoSecurePort=0, ipcPort=37341, storageInfo=lv=-57;cid=testClusterID;nsid=1159085042;c=1730983836734), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-07T12:50:38,576 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x53eb25debeb65791 with lease ID 0x340e93e49e56988: Processing first storage report for DS-fb28e249-71c7-444a-af9a-6f340750efc5 from datanode DatanodeRegistration(127.0.0.1:45909, datanodeUuid=18aca364-8f18-4fb2-a6df-b6b7da64cc15, infoPort=46833, infoSecurePort=0, ipcPort=43255, storageInfo=lv=-57;cid=testClusterID;nsid=1159085042;c=1730983836734) 2024-11-07T12:50:38,576 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x53eb25debeb65791 with lease ID 0x340e93e49e56988: from storage DS-fb28e249-71c7-444a-af9a-6f340750efc5 node DatanodeRegistration(127.0.0.1:45909, datanodeUuid=18aca364-8f18-4fb2-a6df-b6b7da64cc15, infoPort=46833, infoSecurePort=0, ipcPort=43255, storageInfo=lv=-57;cid=testClusterID;nsid=1159085042;c=1730983836734), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:50:38,576 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14c604ed36ecac9a with lease ID 0x340e93e49e56989: Processing first storage report for DS-0244773b-0de3-4fd9-a96d-ec9ea8c8e40a from datanode DatanodeRegistration(127.0.0.1:38133, datanodeUuid=37a16055-f5d5-4efc-a01e-5deeb55ed03a, infoPort=40741, infoSecurePort=0, ipcPort=37341, storageInfo=lv=-57;cid=testClusterID;nsid=1159085042;c=1730983836734) 2024-11-07T12:50:38,577 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14c604ed36ecac9a with lease ID 0x340e93e49e56989: from storage DS-0244773b-0de3-4fd9-a96d-ec9ea8c8e40a node DatanodeRegistration(127.0.0.1:38133, datanodeUuid=37a16055-f5d5-4efc-a01e-5deeb55ed03a, infoPort=40741, infoSecurePort=0, ipcPort=37341, storageInfo=lv=-57;cid=testClusterID;nsid=1159085042;c=1730983836734), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:50:38,577 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x53eb25debeb65791 with lease ID 0x340e93e49e56988: Processing first storage report for DS-3d1ee88e-cd32-418c-a298-3f1a29aecd14 from datanode DatanodeRegistration(127.0.0.1:45909, datanodeUuid=18aca364-8f18-4fb2-a6df-b6b7da64cc15, infoPort=46833, infoSecurePort=0, ipcPort=43255, storageInfo=lv=-57;cid=testClusterID;nsid=1159085042;c=1730983836734) 2024-11-07T12:50:38,577 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x53eb25debeb65791 with lease ID 0x340e93e49e56988: from storage DS-3d1ee88e-cd32-418c-a298-3f1a29aecd14 node DatanodeRegistration(127.0.0.1:45909, datanodeUuid=18aca364-8f18-4fb2-a6df-b6b7da64cc15, infoPort=46833, infoSecurePort=0, ipcPort=43255, storageInfo=lv=-57;cid=testClusterID;nsid=1159085042;c=1730983836734), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-07T12:50:38,687 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48 2024-11-07T12:50:38,763 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/cluster_302973cd-33a2-b9c9-1f63-db82430c09fc/zookeeper_0, clientPort=56842, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/cluster_302973cd-33a2-b9c9-1f63-db82430c09fc/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/cluster_302973cd-33a2-b9c9-1f63-db82430c09fc/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T12:50:38,775 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56842 2024-11-07T12:50:38,786 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:50:38,789 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:50:39,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:50:39,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:50:39,449 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979 with version=8 2024-11-07T12:50:39,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/hbase-staging 2024-11-07T12:50:39,548 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-07T12:50:39,792 INFO [Time-limited test {}] client.ConnectionUtils(128): master/db9ad1cb6cf9:0 server-side Connection retries=45 2024-11-07T12:50:39,803 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:50:39,804 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:50:39,808 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:50:39,808 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:50:39,808 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:50:39,943 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-07T12:50:40,003 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-07T12:50:40,012 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-07T12:50:40,016 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:50:40,043 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 23201 (auto-detected) 2024-11-07T12:50:40,044 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-07T12:50:40,063 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33149 2024-11-07T12:50:40,084 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33149 connecting to ZooKeeper ensemble=127.0.0.1:56842 2024-11-07T12:50:40,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:331490x0, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:50:40,125 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33149-0x1001a4b4eb80000 connected 2024-11-07T12:50:40,147 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:50:40,149 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:50:40,159 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:50:40,163 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979, hbase.cluster.distributed=false 2024-11-07T12:50:40,186 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:50:40,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33149 2024-11-07T12:50:40,192 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33149 2024-11-07T12:50:40,192 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33149 2024-11-07T12:50:40,193 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33149 2024-11-07T12:50:40,193 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33149 2024-11-07T12:50:40,324 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/db9ad1cb6cf9:0 server-side Connection retries=45 2024-11-07T12:50:40,326 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:50:40,326 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:50:40,327 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:50:40,327 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:50:40,327 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:50:40,330 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:50:40,333 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:50:40,334 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43943 2024-11-07T12:50:40,337 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43943 connecting to ZooKeeper ensemble=127.0.0.1:56842 2024-11-07T12:50:40,338 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:50:40,344 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:50:40,354 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:439430x0, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:50:40,355 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43943-0x1001a4b4eb80001 connected 2024-11-07T12:50:40,355 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:50:40,359 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:50:40,367 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:50:40,370 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:50:40,375 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:50:40,376 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43943 2024-11-07T12:50:40,376 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43943 2024-11-07T12:50:40,376 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43943 2024-11-07T12:50:40,377 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43943 2024-11-07T12:50:40,377 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43943 2024-11-07T12:50:40,393 DEBUG [M:0;db9ad1cb6cf9:33149 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db9ad1cb6cf9:33149 2024-11-07T12:50:40,394 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/db9ad1cb6cf9,33149,1730983839599 2024-11-07T12:50:40,401 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:50:40,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:50:40,403 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db9ad1cb6cf9,33149,1730983839599 2024-11-07T12:50:40,425 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:50:40,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:50:40,425 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:50:40,426 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:50:40,427 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db9ad1cb6cf9,33149,1730983839599 from backup master directory 2024-11-07T12:50:40,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db9ad1cb6cf9,33149,1730983839599 2024-11-07T12:50:40,431 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:50:40,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:50:40,431 WARN [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:50:40,432 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db9ad1cb6cf9,33149,1730983839599 2024-11-07T12:50:40,434 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-07T12:50:40,436 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-07T12:50:40,495 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/hbase.id] with ID: 5283191b-cb67-420f-aa8d-fe402e0f145d 2024-11-07T12:50:40,495 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/.tmp/hbase.id 2024-11-07T12:50:40,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:50:40,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:50:40,508 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/.tmp/hbase.id]:[hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/hbase.id] 2024-11-07T12:50:40,551 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:50:40,555 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-07T12:50:40,574 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-07T12:50:40,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:50:40,578 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:50:40,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:50:40,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:50:40,609 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:50:40,611 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T12:50:40,617 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:50:40,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:50:40,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:50:40,669 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store 2024-11-07T12:50:40,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:50:40,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:50:40,694 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-07T12:50:40,697 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:50:40,698 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:50:40,698 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:50:40,698 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:50:40,699 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:50:40,700 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:50:40,700 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:50:40,701 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730983840698Disabling compacts and flushes for region at 1730983840698Disabling writes for close at 1730983840699 (+1 ms)Writing region close event to WAL at 1730983840700 (+1 ms)Closed at 1730983840700 2024-11-07T12:50:40,702 WARN [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/.initializing 2024-11-07T12:50:40,703 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/WALs/db9ad1cb6cf9,33149,1730983839599 2024-11-07T12:50:40,723 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C33149%2C1730983839599, suffix=, logDir=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/WALs/db9ad1cb6cf9,33149,1730983839599, archiveDir=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/oldWALs, maxLogs=10 2024-11-07T12:50:40,732 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C33149%2C1730983839599.1730983840728 2024-11-07T12:50:40,750 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/WALs/db9ad1cb6cf9,33149,1730983839599/db9ad1cb6cf9%2C33149%2C1730983839599.1730983840728 2024-11-07T12:50:40,759 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40741:40741),(127.0.0.1/127.0.0.1:46833:46833)] 2024-11-07T12:50:40,760 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:50:40,761 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:50:40,764 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:50:40,766 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:50:40,803 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:50:40,831 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T12:50:40,835 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:50:40,839 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:50:40,839 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:50:40,843 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T12:50:40,843 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:50:40,844 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:50:40,844 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:50:40,847 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T12:50:40,847 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:50:40,848 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:50:40,848 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:50:40,851 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T12:50:40,851 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:50:40,852 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:50:40,853 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:50:40,856 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:50:40,857 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:50:40,862 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:50:40,862 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:50:40,865 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T12:50:40,869 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:50:40,873 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:50:40,875 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=855449, jitterRate=0.08775986731052399}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T12:50:40,883 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1730983840778Initializing all the Stores at 1730983840780 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983840781 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983840781Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983840782 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983840782Cleaning up temporary data from old regions at 1730983840862 (+80 ms)Region opened successfully at 1730983840883 (+21 ms) 2024-11-07T12:50:40,884 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T12:50:40,922 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50df4e76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9ad1cb6cf9/172.17.0.2:0 2024-11-07T12:50:40,955 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-07T12:50:40,967 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T12:50:40,967 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T12:50:40,971 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T12:50:40,972 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-07T12:50:40,977 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-07T12:50:40,977 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T12:50:41,003 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T12:50:41,016 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T12:50:41,018 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-07T12:50:41,021 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T12:50:41,023 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T12:50:41,025 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-07T12:50:41,027 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T12:50:41,030 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T12:50:41,033 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-07T12:50:41,034 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T12:50:41,036 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T12:50:41,052 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T12:50:41,054 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T12:50:41,061 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:50:41,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:50:41,061 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:50:41,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:50:41,064 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=db9ad1cb6cf9,33149,1730983839599, sessionid=0x1001a4b4eb80000, setting cluster-up flag (Was=false) 2024-11-07T12:50:41,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:50:41,077 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:50:41,083 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T12:50:41,084 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9ad1cb6cf9,33149,1730983839599 2024-11-07T12:50:41,090 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:50:41,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:50:41,096 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T12:50:41,097 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9ad1cb6cf9,33149,1730983839599 2024-11-07T12:50:41,103 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-07T12:50:41,175 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-07T12:50:41,181 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer(746): ClusterId : 5283191b-cb67-420f-aa8d-fe402e0f145d 2024-11-07T12:50:41,184 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:50:41,185 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-07T12:50:41,190 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:50:41,190 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:50:41,191 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T12:50:41,194 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:50:41,195 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25fc9d8b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9ad1cb6cf9/172.17.0.2:0 2024-11-07T12:50:41,197 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db9ad1cb6cf9,33149,1730983839599 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T12:50:41,204 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:50:41,204 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:50:41,204 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:50:41,204 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:50:41,204 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db9ad1cb6cf9:0, corePoolSize=10, maxPoolSize=10 2024-11-07T12:50:41,204 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:50:41,205 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:50:41,205 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:50:41,208 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730983871207 2024-11-07T12:50:41,209 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T12:50:41,210 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:50:41,210 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-07T12:50:41,211 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T12:50:41,214 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T12:50:41,215 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T12:50:41,215 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T12:50:41,215 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T12:50:41,216 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db9ad1cb6cf9:43943 2024-11-07T12:50:41,218 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:50:41,218 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T12:50:41,220 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T12:50:41,221 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T12:50:41,221 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T12:50:41,219 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:41,224 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer(2659): reportForDuty to master=db9ad1cb6cf9,33149,1730983839599 with port=43943, startcode=1730983840278 2024-11-07T12:50:41,226 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T12:50:41,228 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T12:50:41,228 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T12:50:41,232 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T12:50:41,232 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T12:50:41,236 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730983841233,5,FailOnTimeoutGroup] 2024-11-07T12:50:41,239 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:50:41,242 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730983841237,5,FailOnTimeoutGroup] 2024-11-07T12:50:41,242 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:41,243 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T12:50:41,244 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:41,245 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:41,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:50:41,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:50:41,248 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-07T12:50:41,249 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979 2024-11-07T12:50:41,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:50:41,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:50:41,283 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:50:41,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:50:41,292 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:50:41,292 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:50:41,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:50:41,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:50:41,296 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:50:41,296 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:50:41,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:50:41,298 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:50:41,300 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:50:41,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:50:41,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:50:41,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:50:41,307 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:50:41,307 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:50:41,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:50:41,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:50:41,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740 2024-11-07T12:50:41,311 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740 2024-11-07T12:50:41,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:50:41,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:50:41,316 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:50:41,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:50:41,324 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:50:41,325 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=787956, jitterRate=0.0019381344318389893}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:50:41,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1730983841283Initializing all the Stores at 1730983841286 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983841286Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983841288 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983841288Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983841288Cleaning up temporary data from old regions at 1730983841315 (+27 ms)Region opened successfully at 1730983841328 (+13 ms) 2024-11-07T12:50:41,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:50:41,329 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:50:41,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:50:41,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:50:41,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:50:41,330 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60359, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:50:41,333 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:50:41,333 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730983841328Disabling compacts and flushes for region at 1730983841328Disabling writes for close at 1730983841329 (+1 ms)Writing region close event to WAL at 1730983841333 (+4 ms)Closed at 1730983841333 2024-11-07T12:50:41,337 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33149 {}] master.ServerManager(363): Checking decommissioned status of RegionServer db9ad1cb6cf9,43943,1730983840278 2024-11-07T12:50:41,337 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:50:41,337 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-07T12:50:41,340 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33149 {}] master.ServerManager(517): Registering regionserver=db9ad1cb6cf9,43943,1730983840278 2024-11-07T12:50:41,344 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T12:50:41,352 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:50:41,355 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T12:50:41,355 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979 2024-11-07T12:50:41,355 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34313 2024-11-07T12:50:41,355 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T12:50:41,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:50:41,361 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] zookeeper.ZKUtil(111): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db9ad1cb6cf9,43943,1730983840278 2024-11-07T12:50:41,361 WARN [RS:0;db9ad1cb6cf9:43943 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:50:41,361 INFO [RS:0;db9ad1cb6cf9:43943 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:50:41,362 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278 2024-11-07T12:50:41,364 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db9ad1cb6cf9,43943,1730983840278] 2024-11-07T12:50:41,390 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:50:41,402 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:50:41,407 INFO [RS:0;db9ad1cb6cf9:43943 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:50:41,407 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:41,408 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T12:50:41,414 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T12:50:41,416 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:41,416 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:50:41,416 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:50:41,416 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:50:41,416 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:50:41,417 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:50:41,417 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db9ad1cb6cf9:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:50:41,417 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:50:41,417 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:50:41,417 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:50:41,417 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:50:41,418 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:50:41,418 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:50:41,418 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db9ad1cb6cf9:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:50:41,418 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:50:41,419 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:41,419 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:41,419 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:41,419 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:41,420 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:41,420 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,43943,1730983840278-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:50:41,438 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:50:41,440 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,43943,1730983840278-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:41,440 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:41,440 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.Replication(171): db9ad1cb6cf9,43943,1730983840278 started 2024-11-07T12:50:41,461 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:41,461 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer(1482): Serving as db9ad1cb6cf9,43943,1730983840278, RpcServer on db9ad1cb6cf9/172.17.0.2:43943, sessionid=0x1001a4b4eb80001 2024-11-07T12:50:41,462 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:50:41,462 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db9ad1cb6cf9,43943,1730983840278 2024-11-07T12:50:41,462 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9ad1cb6cf9,43943,1730983840278' 2024-11-07T12:50:41,463 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:50:41,464 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:50:41,465 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:50:41,465 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:50:41,465 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db9ad1cb6cf9,43943,1730983840278 2024-11-07T12:50:41,465 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9ad1cb6cf9,43943,1730983840278' 2024-11-07T12:50:41,465 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:50:41,466 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:50:41,467 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:50:41,467 INFO [RS:0;db9ad1cb6cf9:43943 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:50:41,467 INFO [RS:0;db9ad1cb6cf9:43943 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:50:41,506 WARN [db9ad1cb6cf9:33149 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-07T12:50:41,575 INFO [RS:0;db9ad1cb6cf9:43943 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C43943%2C1730983840278, suffix=, logDir=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278, archiveDir=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/oldWALs, maxLogs=32 2024-11-07T12:50:41,578 INFO [RS:0;db9ad1cb6cf9:43943 {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C43943%2C1730983840278.1730983841578 2024-11-07T12:50:41,587 INFO [RS:0;db9ad1cb6cf9:43943 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983841578 2024-11-07T12:50:41,589 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46833:46833),(127.0.0.1/127.0.0.1:40741:40741)] 2024-11-07T12:50:41,758 DEBUG [db9ad1cb6cf9:33149 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-07T12:50:41,771 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db9ad1cb6cf9,43943,1730983840278 2024-11-07T12:50:41,777 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9ad1cb6cf9,43943,1730983840278, state=OPENING 2024-11-07T12:50:41,782 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T12:50:41,784 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:50:41,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:50:41,785 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:50:41,785 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:50:41,786 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:50:41,788 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,43943,1730983840278}] 2024-11-07T12:50:41,964 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T12:50:41,967 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51607, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T12:50:41,977 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-07T12:50:41,978 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:50:41,982 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C43943%2C1730983840278.meta, suffix=.meta, logDir=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278, archiveDir=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/oldWALs, maxLogs=32 2024-11-07T12:50:41,984 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C43943%2C1730983840278.meta.1730983841983.meta 2024-11-07T12:50:41,991 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.meta.1730983841983.meta 2024-11-07T12:50:41,994 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40741:40741),(127.0.0.1/127.0.0.1:46833:46833)] 2024-11-07T12:50:41,996 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:50:41,997 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T12:50:42,000 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T12:50:42,005 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T12:50:42,009 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T12:50:42,010 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:50:42,010 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-07T12:50:42,010 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-07T12:50:42,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:50:42,015 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:50:42,015 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:50:42,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:50:42,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:50:42,018 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:50:42,018 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:50:42,018 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:50:42,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:50:42,020 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:50:42,020 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:50:42,021 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:50:42,021 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:50:42,022 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:50:42,022 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:50:42,023 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:50:42,023 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:50:42,024 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740 2024-11-07T12:50:42,027 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740 2024-11-07T12:50:42,029 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:50:42,029 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:50:42,030 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:50:42,033 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:50:42,034 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=761869, jitterRate=-0.031233519315719604}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:50:42,035 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-07T12:50:42,036 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1730983842011Writing region info on filesystem at 1730983842011Initializing all the Stores at 1730983842012 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983842013 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983842013Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983842013Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983842013Cleaning up temporary data from old regions at 1730983842029 (+16 ms)Running coprocessor post-open hooks at 1730983842035 (+6 ms)Region opened successfully at 1730983842036 (+1 ms) 2024-11-07T12:50:42,043 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730983841954 2024-11-07T12:50:42,055 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T12:50:42,055 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-07T12:50:42,057 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db9ad1cb6cf9,43943,1730983840278 2024-11-07T12:50:42,059 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9ad1cb6cf9,43943,1730983840278, state=OPEN 2024-11-07T12:50:42,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:50:42,064 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:50:42,064 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:50:42,064 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:50:42,064 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,43943,1730983840278 2024-11-07T12:50:42,069 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T12:50:42,069 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,43943,1730983840278 in 276 msec 2024-11-07T12:50:42,077 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T12:50:42,077 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 728 msec 2024-11-07T12:50:42,078 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:50:42,079 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-07T12:50:42,101 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:50:42,102 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db9ad1cb6cf9,43943,1730983840278, seqNum=-1] 2024-11-07T12:50:42,125 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:50:42,127 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59417, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:50:42,147 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0140 sec 2024-11-07T12:50:42,148 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730983842148, completionTime=-1 2024-11-07T12:50:42,150 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-07T12:50:42,150 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-07T12:50:42,178 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-07T12:50:42,178 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730983902178 2024-11-07T12:50:42,178 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730983962178 2024-11-07T12:50:42,178 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 27 msec 2024-11-07T12:50:42,182 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,33149,1730983839599-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:42,182 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,33149,1730983839599-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:42,183 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,33149,1730983839599-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:42,184 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db9ad1cb6cf9:33149, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:42,184 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:42,185 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:42,191 DEBUG [master/db9ad1cb6cf9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-07T12:50:42,212 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.780sec 2024-11-07T12:50:42,213 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T12:50:42,214 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T12:50:42,215 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T12:50:42,216 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T12:50:42,216 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T12:50:42,217 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,33149,1730983839599-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:50:42,217 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,33149,1730983839599-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T12:50:42,225 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-07T12:50:42,226 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T12:50:42,226 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,33149,1730983839599-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:50:42,292 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3653b52f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:50:42,296 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-07T12:50:42,296 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-07T12:50:42,301 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request db9ad1cb6cf9,33149,-1 for getting cluster id 2024-11-07T12:50:42,305 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-07T12:50:42,314 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5283191b-cb67-420f-aa8d-fe402e0f145d' 2024-11-07T12:50:42,317 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-07T12:50:42,317 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5283191b-cb67-420f-aa8d-fe402e0f145d" 2024-11-07T12:50:42,319 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32768bc4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:50:42,319 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [db9ad1cb6cf9,33149,-1] 2024-11-07T12:50:42,322 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-07T12:50:42,324 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:50:42,325 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45184, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-07T12:50:42,329 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@458c2fcc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:50:42,329 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:50:42,336 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db9ad1cb6cf9,43943,1730983840278, seqNum=-1] 2024-11-07T12:50:42,337 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:50:42,339 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41338, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:50:42,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=db9ad1cb6cf9,33149,1730983839599 2024-11-07T12:50:42,361 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:50:42,369 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-07T12:50:42,373 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-07T12:50:42,378 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is db9ad1cb6cf9,33149,1730983839599 2024-11-07T12:50:42,382 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@50099097 2024-11-07T12:50:42,383 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T12:50:42,386 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45194, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T12:50:42,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-07T12:50:42,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-07T12:50:42,391 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:50:42,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-07T12:50:42,402 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T12:50:42,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-07T12:50:42,405 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:50:42,407 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T12:50:42,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:50:42,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741835_1011 (size=389) 2024-11-07T12:50:42,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741835_1011 (size=389) 2024-11-07T12:50:42,850 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f360db58326b841fa64f6e8c6cd8d990, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979 2024-11-07T12:50:42,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741836_1012 (size=72) 2024-11-07T12:50:42,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741836_1012 (size=72) 2024-11-07T12:50:42,862 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:50:42,862 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing f360db58326b841fa64f6e8c6cd8d990, disabling compactions & flushes 2024-11-07T12:50:42,862 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. 2024-11-07T12:50:42,862 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. 2024-11-07T12:50:42,862 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. after waiting 0 ms 2024-11-07T12:50:42,862 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. 2024-11-07T12:50:42,862 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. 2024-11-07T12:50:42,862 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for f360db58326b841fa64f6e8c6cd8d990: Waiting for close lock at 1730983842862Disabling compacts and flushes for region at 1730983842862Disabling writes for close at 1730983842862Writing region close event to WAL at 1730983842862Closed at 1730983842862 2024-11-07T12:50:42,865 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T12:50:42,870 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1730983842865"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730983842865"}]},"ts":"1730983842865"} 2024-11-07T12:50:42,875 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-07T12:50:42,877 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T12:50:42,880 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730983842877"}]},"ts":"1730983842877"} 2024-11-07T12:50:42,884 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-07T12:50:42,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=f360db58326b841fa64f6e8c6cd8d990, ASSIGN}] 2024-11-07T12:50:42,889 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=f360db58326b841fa64f6e8c6cd8d990, ASSIGN 2024-11-07T12:50:42,891 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=f360db58326b841fa64f6e8c6cd8d990, ASSIGN; state=OFFLINE, location=db9ad1cb6cf9,43943,1730983840278; forceNewPlan=false, retain=false 2024-11-07T12:50:43,042 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f360db58326b841fa64f6e8c6cd8d990, regionState=OPENING, regionLocation=db9ad1cb6cf9,43943,1730983840278 2024-11-07T12:50:43,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=f360db58326b841fa64f6e8c6cd8d990, ASSIGN because future has completed 2024-11-07T12:50:43,048 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f360db58326b841fa64f6e8c6cd8d990, server=db9ad1cb6cf9,43943,1730983840278}] 2024-11-07T12:50:43,209 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. 2024-11-07T12:50:43,210 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => f360db58326b841fa64f6e8c6cd8d990, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:50:43,210 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:50:43,210 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:50:43,211 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:50:43,211 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:50:43,213 INFO [StoreOpener-f360db58326b841fa64f6e8c6cd8d990-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:50:43,216 INFO [StoreOpener-f360db58326b841fa64f6e8c6cd8d990-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f360db58326b841fa64f6e8c6cd8d990 columnFamilyName info 2024-11-07T12:50:43,216 DEBUG [StoreOpener-f360db58326b841fa64f6e8c6cd8d990-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:50:43,217 INFO [StoreOpener-f360db58326b841fa64f6e8c6cd8d990-1 {}] regionserver.HStore(327): Store=f360db58326b841fa64f6e8c6cd8d990/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:50:43,217 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:50:43,219 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:50:43,220 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:50:43,221 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:50:43,221 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:50:43,224 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:50:43,228 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:50:43,229 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened f360db58326b841fa64f6e8c6cd8d990; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=835499, jitterRate=0.06239283084869385}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T12:50:43,229 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:50:43,230 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for f360db58326b841fa64f6e8c6cd8d990: Running coprocessor pre-open hook at 1730983843211Writing region info on filesystem at 1730983843211Initializing all the Stores at 1730983843213 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983843213Cleaning up temporary data from old regions at 1730983843221 (+8 ms)Running coprocessor post-open hooks at 1730983843229 (+8 ms)Region opened successfully at 1730983843230 (+1 ms) 2024-11-07T12:50:43,233 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990., pid=6, masterSystemTime=1730983843202 2024-11-07T12:50:43,237 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. 2024-11-07T12:50:43,237 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. 2024-11-07T12:50:43,238 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f360db58326b841fa64f6e8c6cd8d990, regionState=OPEN, openSeqNum=2, regionLocation=db9ad1cb6cf9,43943,1730983840278 2024-11-07T12:50:43,242 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f360db58326b841fa64f6e8c6cd8d990, server=db9ad1cb6cf9,43943,1730983840278 because future has completed 2024-11-07T12:50:43,248 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-07T12:50:43,248 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure f360db58326b841fa64f6e8c6cd8d990, server=db9ad1cb6cf9,43943,1730983840278 in 196 msec 2024-11-07T12:50:43,252 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-07T12:50:43,252 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=f360db58326b841fa64f6e8c6cd8d990, ASSIGN in 362 msec 2024-11-07T12:50:43,254 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T12:50:43,254 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730983843254"}]},"ts":"1730983843254"} 2024-11-07T12:50:43,258 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-07T12:50:43,260 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T12:50:43,263 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 866 msec 2024-11-07T12:50:47,513 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-07T12:50:47,557 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-07T12:50:47,558 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-07T12:50:50,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-07T12:50:50,001 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-07T12:50:50,002 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-07T12:50:50,002 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-07T12:50:50,003 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:50:50,003 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-07T12:50:50,003 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-07T12:50:50,004 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-07T12:50:52,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:50:52,515 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-07T12:50:52,518 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-07T12:50:52,524 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-07T12:50:52,525 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. 2024-11-07T12:50:52,526 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C43943%2C1730983840278.1730983852525 2024-11-07T12:50:52,534 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:50:52,535 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:50:52,535 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:50:52,535 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:50:52,535 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:50:52,536 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983841578 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983852525 2024-11-07T12:50:52,537 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40741:40741),(127.0.0.1/127.0.0.1:46833:46833)] 2024-11-07T12:50:52,537 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983841578 is not closed yet, will try archiving it next time 2024-11-07T12:50:52,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741833_1009 (size=451) 2024-11-07T12:50:52,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741833_1009 (size=451) 2024-11-07T12:50:52,541 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983841578 to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/oldWALs/db9ad1cb6cf9%2C43943%2C1730983840278.1730983841578 2024-11-07T12:50:52,547 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990., hostname=db9ad1cb6cf9,43943,1730983840278, seqNum=2] 2024-11-07T12:51:04,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43943 {}] regionserver.HRegion(8855): Flush requested on f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:51:04,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f360db58326b841fa64f6e8c6cd8d990 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:51:04,647 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/cd82bf13d0fb458bba76afec204f9770 is 1080, key is row0001/info:/1730983852550/Put/seqid=0 2024-11-07T12:51:04,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741838_1014 (size=12509) 2024-11-07T12:51:04,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741838_1014 (size=12509) 2024-11-07T12:51:05,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/cd82bf13d0fb458bba76afec204f9770 2024-11-07T12:51:05,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/cd82bf13d0fb458bba76afec204f9770 as hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/cd82bf13d0fb458bba76afec204f9770 2024-11-07T12:51:05,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/cd82bf13d0fb458bba76afec204f9770, entries=7, sequenceid=11, filesize=12.2 K 2024-11-07T12:51:05,129 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for f360db58326b841fa64f6e8c6cd8d990 in 541ms, sequenceid=11, compaction requested=false 2024-11-07T12:51:05,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f360db58326b841fa64f6e8c6cd8d990: 2024-11-07T12:51:08,684 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T12:51:12,598 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C43943%2C1730983840278.1730983872597 2024-11-07T12:51:12,806 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:12,806 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:12,806 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:12,806 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:12,807 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:12,807 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:12,807 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983852525 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983872597 2024-11-07T12:51:12,808 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40741:40741),(127.0.0.1/127.0.0.1:46833:46833)] 2024-11-07T12:51:12,808 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983852525 is not closed yet, will try archiving it next time 2024-11-07T12:51:12,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741837_1013 (size=12399) 2024-11-07T12:51:12,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741837_1013 (size=12399) 2024-11-07T12:51:13,011 INFO [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:15,215 INFO [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:17,420 INFO [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:19,624 INFO [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:19,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43943 {}] regionserver.HRegion(8855): Flush requested on f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:51:19,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f360db58326b841fa64f6e8c6cd8d990 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:51:19,826 INFO [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:19,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/f3e4bb766dc04a46989fcc333012c32f is 1080, key is row0008/info:/1730983866587/Put/seqid=0 2024-11-07T12:51:19,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741840_1016 (size=12509) 2024-11-07T12:51:19,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741840_1016 (size=12509) 2024-11-07T12:51:19,841 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/f3e4bb766dc04a46989fcc333012c32f 2024-11-07T12:51:19,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/f3e4bb766dc04a46989fcc333012c32f as hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/f3e4bb766dc04a46989fcc333012c32f 2024-11-07T12:51:19,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/f3e4bb766dc04a46989fcc333012c32f, entries=7, sequenceid=21, filesize=12.2 K 2024-11-07T12:51:20,062 INFO [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:20,062 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for f360db58326b841fa64f6e8c6cd8d990 in 437ms, sequenceid=21, compaction requested=false 2024-11-07T12:51:20,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f360db58326b841fa64f6e8c6cd8d990: 2024-11-07T12:51:20,062 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-07T12:51:20,062 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:51:20,063 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/cd82bf13d0fb458bba76afec204f9770 because midkey is the same as first or last row 2024-11-07T12:51:21,828 INFO [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:22,251 INFO [master/db9ad1cb6cf9:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-07T12:51:22,251 INFO [master/db9ad1cb6cf9:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-07T12:51:24,032 INFO [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:24,034 WARN [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:24,035 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db9ad1cb6cf9%2C43943%2C1730983840278:(num 1730983872597) roll requested 2024-11-07T12:51:24,036 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C43943%2C1730983840278.1730983884035 2024-11-07T12:51:24,244 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:24,244 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:24,244 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:24,244 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:24,244 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:24,244 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:24,245 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983872597 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983884035 2024-11-07T12:51:24,246 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40741:40741),(127.0.0.1/127.0.0.1:46833:46833)] 2024-11-07T12:51:24,246 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983872597 is not closed yet, will try archiving it next time 2024-11-07T12:51:24,246 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983852525 to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/oldWALs/db9ad1cb6cf9%2C43943%2C1730983840278.1730983852525 2024-11-07T12:51:24,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741839_1015 (size=7739) 2024-11-07T12:51:24,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741839_1015 (size=7739) 2024-11-07T12:51:26,235 INFO [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:28,210 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f360db58326b841fa64f6e8c6cd8d990, had cached 0 bytes from a total of 25018 2024-11-07T12:51:28,440 INFO [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:30,644 INFO [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:32,848 INFO [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:34,850 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T12:51:34,850 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C43943%2C1730983840278.1730983894850 2024-11-07T12:51:38,684 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T12:51:39,858 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:39,860 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:39,860 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db9ad1cb6cf9%2C43943%2C1730983840278:(num 1730983894850) roll requested 2024-11-07T12:51:39,861 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:39,861 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:39,861 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:39,861 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:39,861 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:39,861 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983884035 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983894850 2024-11-07T12:51:39,863 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46833:46833),(127.0.0.1/127.0.0.1:40741:40741)] 2024-11-07T12:51:39,863 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983884035 is not closed yet, will try archiving it next time 2024-11-07T12:51:39,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741841_1017 (size=4753) 2024-11-07T12:51:39,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741841_1017 (size=4753) 2024-11-07T12:51:39,865 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C43943%2C1730983840278.1730983899865 2024-11-07T12:51:44,867 INFO [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK], DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK]] 2024-11-07T12:51:44,867 WARN [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK], DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK]] 2024-11-07T12:51:44,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43943 {}] regionserver.HRegion(8855): Flush requested on f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:51:44,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f360db58326b841fa64f6e8c6cd8d990 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:51:44,875 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK], DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK]] 2024-11-07T12:51:44,875 WARN [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK], DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK]] 2024-11-07T12:51:46,868 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T12:51:49,870 INFO [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK], DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK]] 2024-11-07T12:51:49,870 WARN [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK], DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK]] 2024-11-07T12:51:49,870 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:49,870 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:49,870 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:49,870 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:49,871 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:49,871 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983894850 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983899865 2024-11-07T12:51:49,872 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40741:40741),(127.0.0.1/127.0.0.1:46833:46833)] 2024-11-07T12:51:49,872 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983894850 is not closed yet, will try archiving it next time 2024-11-07T12:51:49,872 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db9ad1cb6cf9%2C43943%2C1730983840278:(num 1730983899865) roll requested 2024-11-07T12:51:49,873 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C43943%2C1730983840278.1730983909872 2024-11-07T12:51:49,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741842_1018 (size=1569) 2024-11-07T12:51:49,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741842_1018 (size=1569) 2024-11-07T12:51:49,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/4d46221f0c8243b099e8be4b625b231b is 1080, key is row0015/info:/1730983881626/Put/seqid=0 2024-11-07T12:51:49,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741844_1020 (size=12509) 2024-11-07T12:51:49,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741844_1020 (size=12509) 2024-11-07T12:51:49,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/4d46221f0c8243b099e8be4b625b231b 2024-11-07T12:51:49,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/4d46221f0c8243b099e8be4b625b231b as hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/4d46221f0c8243b099e8be4b625b231b 2024-11-07T12:51:49,902 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/4d46221f0c8243b099e8be4b625b231b, entries=7, sequenceid=31, filesize=12.2 K 2024-11-07T12:51:54,881 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:54,881 WARN [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:54,903 INFO [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:54,904 WARN [FSHLog-0-hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979-prefix:db9ad1cb6cf9,43943,1730983840278 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38133,DS-5250b43d-a6e2-4e1f-a284-f227144507ec,DISK], DatanodeInfoWithStorage[127.0.0.1:45909,DS-fb28e249-71c7-444a-af9a-6f340750efc5,DISK]] 2024-11-07T12:51:54,904 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for f360db58326b841fa64f6e8c6cd8d990 in 10036ms, sequenceid=31, compaction requested=true 2024-11-07T12:51:54,904 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:54,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f360db58326b841fa64f6e8c6cd8d990: 2024-11-07T12:51:54,904 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:54,904 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:54,904 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-07T12:51:54,904 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:51:54,904 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:54,904 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/cd82bf13d0fb458bba76afec204f9770 because midkey is the same as first or last row 2024-11-07T12:51:54,904 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:54,904 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983899865 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983909872 2024-11-07T12:51:54,905 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40741:40741),(127.0.0.1/127.0.0.1:46833:46833)] 2024-11-07T12:51:54,905 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983899865 is not closed yet, will try archiving it next time 2024-11-07T12:51:54,906 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983872597 to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/oldWALs/db9ad1cb6cf9%2C43943%2C1730983840278.1730983872597 2024-11-07T12:51:54,906 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db9ad1cb6cf9%2C43943%2C1730983840278:(num 1730983914906) roll requested 2024-11-07T12:51:54,906 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C43943%2C1730983840278.1730983914906 2024-11-07T12:51:54,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f360db58326b841fa64f6e8c6cd8d990:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:51:54,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741843_1019 (size=438) 2024-11-07T12:51:54,908 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983884035 to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/oldWALs/db9ad1cb6cf9%2C43943%2C1730983840278.1730983884035 2024-11-07T12:51:54,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741843_1019 (size=438) 2024-11-07T12:51:54,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:51:54,909 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:51:54,910 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983894850 to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/oldWALs/db9ad1cb6cf9%2C43943%2C1730983840278.1730983894850 2024-11-07T12:51:54,911 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983899865 to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/oldWALs/db9ad1cb6cf9%2C43943%2C1730983840278.1730983899865 2024-11-07T12:51:54,913 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:51:54,914 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.HStore(1541): f360db58326b841fa64f6e8c6cd8d990/info is initiating minor compaction (all files) 2024-11-07T12:51:54,915 INFO [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f360db58326b841fa64f6e8c6cd8d990/info in TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. 2024-11-07T12:51:54,915 INFO [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/cd82bf13d0fb458bba76afec204f9770, hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/f3e4bb766dc04a46989fcc333012c32f, hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/4d46221f0c8243b099e8be4b625b231b] into tmpdir=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp, totalSize=36.6 K 2024-11-07T12:51:54,916 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] compactions.Compactor(225): Compacting cd82bf13d0fb458bba76afec204f9770, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1730983852550 2024-11-07T12:51:54,917 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] compactions.Compactor(225): Compacting f3e4bb766dc04a46989fcc333012c32f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1730983866587 2024-11-07T12:51:54,917 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4d46221f0c8243b099e8be4b625b231b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1730983881626 2024-11-07T12:51:54,921 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:54,921 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:54,921 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:54,922 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:54,922 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:54,922 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983909872 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983914906 2024-11-07T12:51:54,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741845_1021 (size=93) 2024-11-07T12:51:54,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741845_1021 (size=93) 2024-11-07T12:51:54,925 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983909872 to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/oldWALs/db9ad1cb6cf9%2C43943%2C1730983840278.1730983909872 2024-11-07T12:51:54,933 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46833:46833),(127.0.0.1/127.0.0.1:40741:40741)] 2024-11-07T12:51:54,933 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C43943%2C1730983840278.1730983914933 2024-11-07T12:51:54,942 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:54,942 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:54,942 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:54,942 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:54,946 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:51:54,947 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983914906 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983914933 2024-11-07T12:51:54,950 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46833:46833),(127.0.0.1/127.0.0.1:40741:40741)] 2024-11-07T12:51:54,950 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/WALs/db9ad1cb6cf9,43943,1730983840278/db9ad1cb6cf9%2C43943%2C1730983840278.1730983914906 is not closed yet, will try archiving it next time 2024-11-07T12:51:54,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741846_1022 (size=1258) 2024-11-07T12:51:54,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741846_1022 (size=1258) 2024-11-07T12:51:54,952 INFO [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f360db58326b841fa64f6e8c6cd8d990#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:51:54,954 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/cab4e686671c4147b72eda8ca7b55aa8 is 1080, key is row0001/info:/1730983852550/Put/seqid=0 2024-11-07T12:51:54,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741848_1024 (size=27710) 2024-11-07T12:51:54,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741848_1024 (size=27710) 2024-11-07T12:51:54,974 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/cab4e686671c4147b72eda8ca7b55aa8 as hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/cab4e686671c4147b72eda8ca7b55aa8 2024-11-07T12:51:54,991 INFO [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f360db58326b841fa64f6e8c6cd8d990/info of f360db58326b841fa64f6e8c6cd8d990 into cab4e686671c4147b72eda8ca7b55aa8(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:51:54,991 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f360db58326b841fa64f6e8c6cd8d990: 2024-11-07T12:51:54,993 INFO [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990., storeName=f360db58326b841fa64f6e8c6cd8d990/info, priority=13, startTime=1730983914906; duration=0sec 2024-11-07T12:51:54,993 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-07T12:51:54,993 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:51:54,993 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/cab4e686671c4147b72eda8ca7b55aa8 because midkey is the same as first or last row 2024-11-07T12:51:54,994 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-07T12:51:54,994 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:51:54,994 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/cab4e686671c4147b72eda8ca7b55aa8 because midkey is the same as first or last row 2024-11-07T12:51:54,994 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-07T12:51:54,994 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:51:54,994 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/cab4e686671c4147b72eda8ca7b55aa8 because midkey is the same as first or last row 2024-11-07T12:51:54,994 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:51:54,994 DEBUG [RS:0;db9ad1cb6cf9:43943-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f360db58326b841fa64f6e8c6cd8d990:info 2024-11-07T12:52:06,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43943 {}] regionserver.HRegion(8855): Flush requested on f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:52:06,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f360db58326b841fa64f6e8c6cd8d990 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:52:06,964 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/2692405ea9df4f8e9cf99c9eb6579cb7 is 1080, key is row0022/info:/1730983914935/Put/seqid=0 2024-11-07T12:52:06,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741849_1025 (size=12509) 2024-11-07T12:52:06,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741849_1025 (size=12509) 2024-11-07T12:52:06,972 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/2692405ea9df4f8e9cf99c9eb6579cb7 2024-11-07T12:52:06,981 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/2692405ea9df4f8e9cf99c9eb6579cb7 as hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/2692405ea9df4f8e9cf99c9eb6579cb7 2024-11-07T12:52:06,990 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/2692405ea9df4f8e9cf99c9eb6579cb7, entries=7, sequenceid=42, filesize=12.2 K 2024-11-07T12:52:06,991 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for f360db58326b841fa64f6e8c6cd8d990 in 34ms, sequenceid=42, compaction requested=false 2024-11-07T12:52:06,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f360db58326b841fa64f6e8c6cd8d990: 2024-11-07T12:52:06,992 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-07T12:52:06,992 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:52:06,992 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/cab4e686671c4147b72eda8ca7b55aa8 because midkey is the same as first or last row 2024-11-07T12:52:08,685 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T12:52:13,211 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f360db58326b841fa64f6e8c6cd8d990, had cached 0 bytes from a total of 40219 2024-11-07T12:52:14,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-07T12:52:14,970 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:52:14,970 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:52:14,975 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:14,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:14,976 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-07T12:52:14,976 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T12:52:14,976 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1904156029, stopped=false 2024-11-07T12:52:14,976 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=db9ad1cb6cf9,33149,1730983839599 2024-11-07T12:52:14,978 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:52:14,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:52:14,978 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:14,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:14,979 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:52:14,979 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:52:14,979 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:52:14,979 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:14,979 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:52:14,979 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'db9ad1cb6cf9,43943,1730983840278' ***** 2024-11-07T12:52:14,980 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T12:52:14,980 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:52:14,980 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:52:14,980 INFO [RS:0;db9ad1cb6cf9:43943 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:52:14,980 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T12:52:14,980 INFO [RS:0;db9ad1cb6cf9:43943 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:52:14,981 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer(3091): Received CLOSE for f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:52:14,981 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer(959): stopping server db9ad1cb6cf9,43943,1730983840278 2024-11-07T12:52:14,981 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:52:14,982 INFO [RS:0;db9ad1cb6cf9:43943 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;db9ad1cb6cf9:43943. 2024-11-07T12:52:14,982 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:52:14,982 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:14,982 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f360db58326b841fa64f6e8c6cd8d990, disabling compactions & flushes 2024-11-07T12:52:14,982 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. 2024-11-07T12:52:14,982 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. 2024-11-07T12:52:14,982 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:52:14,982 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. after waiting 0 ms 2024-11-07T12:52:14,982 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:52:14,982 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. 2024-11-07T12:52:14,982 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:52:14,982 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-07T12:52:14,982 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing f360db58326b841fa64f6e8c6cd8d990 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-07T12:52:14,982 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-07T12:52:14,983 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, f360db58326b841fa64f6e8c6cd8d990=TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990.} 2024-11-07T12:52:14,983 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:52:14,983 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:52:14,983 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:52:14,983 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, f360db58326b841fa64f6e8c6cd8d990 2024-11-07T12:52:14,983 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:52:14,983 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:52:14,983 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-07T12:52:14,988 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/e5ca5df1f4394d2486c4273a9ceb0856 is 1080, key is row0029/info:/1730983928960/Put/seqid=0 2024-11-07T12:52:14,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741850_1026 (size=8193) 2024-11-07T12:52:14,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741850_1026 (size=8193) 2024-11-07T12:52:14,995 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/e5ca5df1f4394d2486c4273a9ceb0856 2024-11-07T12:52:15,004 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/.tmp/info/0af5d781fe6b45f980cd7023fcaed20b is 195, key is TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990./info:regioninfo/1730983843238/Put/seqid=0 2024-11-07T12:52:15,005 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/.tmp/info/e5ca5df1f4394d2486c4273a9ceb0856 as hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/e5ca5df1f4394d2486c4273a9ceb0856 2024-11-07T12:52:15,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741851_1027 (size=7016) 2024-11-07T12:52:15,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741851_1027 (size=7016) 2024-11-07T12:52:15,011 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/.tmp/info/0af5d781fe6b45f980cd7023fcaed20b 2024-11-07T12:52:15,013 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/e5ca5df1f4394d2486c4273a9ceb0856, entries=3, sequenceid=48, filesize=8.0 K 2024-11-07T12:52:15,015 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for f360db58326b841fa64f6e8c6cd8d990 in 33ms, sequenceid=48, compaction requested=true 2024-11-07T12:52:15,015 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/cd82bf13d0fb458bba76afec204f9770, hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/f3e4bb766dc04a46989fcc333012c32f, hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/4d46221f0c8243b099e8be4b625b231b] to archive 2024-11-07T12:52:15,019 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T12:52:15,022 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/cd82bf13d0fb458bba76afec204f9770 to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/archive/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/cd82bf13d0fb458bba76afec204f9770 2024-11-07T12:52:15,024 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/f3e4bb766dc04a46989fcc333012c32f to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/archive/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/f3e4bb766dc04a46989fcc333012c32f 2024-11-07T12:52:15,026 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/4d46221f0c8243b099e8be4b625b231b to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/archive/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/info/4d46221f0c8243b099e8be4b625b231b 2024-11-07T12:52:15,040 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/.tmp/ns/63d417b129d84e959d83be4503f6cf5d is 43, key is default/ns:d/1730983842132/Put/seqid=0 2024-11-07T12:52:15,042 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=db9ad1cb6cf9:33149 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-07T12:52:15,046 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [cd82bf13d0fb458bba76afec204f9770=12509, f3e4bb766dc04a46989fcc333012c32f=12509, 4d46221f0c8243b099e8be4b625b231b=12509] 2024-11-07T12:52:15,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741852_1028 (size=5153) 2024-11-07T12:52:15,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741852_1028 (size=5153) 2024-11-07T12:52:15,049 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/.tmp/ns/63d417b129d84e959d83be4503f6cf5d 2024-11-07T12:52:15,052 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/default/TestLogRolling-testSlowSyncLogRolling/f360db58326b841fa64f6e8c6cd8d990/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-07T12:52:15,054 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. 2024-11-07T12:52:15,055 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f360db58326b841fa64f6e8c6cd8d990: Waiting for close lock at 1730983934981Running coprocessor pre-close hooks at 1730983934982 (+1 ms)Disabling compacts and flushes for region at 1730983934982Disabling writes for close at 1730983934982Obtaining lock to block concurrent updates at 1730983934982Preparing flush snapshotting stores in f360db58326b841fa64f6e8c6cd8d990 at 1730983934982Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1730983934983 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. at 1730983934984 (+1 ms)Flushing f360db58326b841fa64f6e8c6cd8d990/info: creating writer at 1730983934984Flushing f360db58326b841fa64f6e8c6cd8d990/info: appending metadata at 1730983934987 (+3 ms)Flushing f360db58326b841fa64f6e8c6cd8d990/info: closing flushed file at 1730983934988 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f870ca4: reopening flushed file at 1730983935004 (+16 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for f360db58326b841fa64f6e8c6cd8d990 in 33ms, sequenceid=48, compaction requested=true at 1730983935015 (+11 ms)Writing region close event to WAL at 1730983935047 (+32 ms)Running coprocessor post-close hooks at 1730983935053 (+6 ms)Closed at 1730983935054 (+1 ms) 2024-11-07T12:52:15,055 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1730983842387.f360db58326b841fa64f6e8c6cd8d990. 2024-11-07T12:52:15,073 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/.tmp/table/c59eba4a63df489aba4863c89a91ff9f is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1730983843254/Put/seqid=0 2024-11-07T12:52:15,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741853_1029 (size=5396) 2024-11-07T12:52:15,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741853_1029 (size=5396) 2024-11-07T12:52:15,079 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/.tmp/table/c59eba4a63df489aba4863c89a91ff9f 2024-11-07T12:52:15,087 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/.tmp/info/0af5d781fe6b45f980cd7023fcaed20b as hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/info/0af5d781fe6b45f980cd7023fcaed20b 2024-11-07T12:52:15,095 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/info/0af5d781fe6b45f980cd7023fcaed20b, entries=10, sequenceid=11, filesize=6.9 K 2024-11-07T12:52:15,097 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/.tmp/ns/63d417b129d84e959d83be4503f6cf5d as hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/ns/63d417b129d84e959d83be4503f6cf5d 2024-11-07T12:52:15,104 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/ns/63d417b129d84e959d83be4503f6cf5d, entries=2, sequenceid=11, filesize=5.0 K 2024-11-07T12:52:15,105 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/.tmp/table/c59eba4a63df489aba4863c89a91ff9f as hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/table/c59eba4a63df489aba4863c89a91ff9f 2024-11-07T12:52:15,113 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/table/c59eba4a63df489aba4863c89a91ff9f, entries=2, sequenceid=11, filesize=5.3 K 2024-11-07T12:52:15,115 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false 2024-11-07T12:52:15,120 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-07T12:52:15,121 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:52:15,121 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:52:15,122 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730983934983Running coprocessor pre-close hooks at 1730983934983Disabling compacts and flushes for region at 1730983934983Disabling writes for close at 1730983934983Obtaining lock to block concurrent updates at 1730983934983Preparing flush snapshotting stores in 1588230740 at 1730983934983Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1730983934983Flushing stores of hbase:meta,,1.1588230740 at 1730983934984 (+1 ms)Flushing 1588230740/info: creating writer at 1730983934984Flushing 1588230740/info: appending metadata at 1730983935004 (+20 ms)Flushing 1588230740/info: closing flushed file at 1730983935004Flushing 1588230740/ns: creating writer at 1730983935018 (+14 ms)Flushing 1588230740/ns: appending metadata at 1730983935039 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1730983935039Flushing 1588230740/table: creating writer at 1730983935056 (+17 ms)Flushing 1588230740/table: appending metadata at 1730983935072 (+16 ms)Flushing 1588230740/table: closing flushed file at 1730983935072Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6210853a: reopening flushed file at 1730983935086 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8c51bae: reopening flushed file at 1730983935096 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a9199d7: reopening flushed file at 1730983935104 (+8 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false at 1730983935115 (+11 ms)Writing region close event to WAL at 1730983935116 (+1 ms)Running coprocessor post-close hooks at 1730983935121 (+5 ms)Closed at 1730983935121 2024-11-07T12:52:15,122 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T12:52:15,183 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer(976): stopping server db9ad1cb6cf9,43943,1730983840278; all regions closed. 2024-11-07T12:52:15,185 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:15,185 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:15,185 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:15,185 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:15,185 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:15,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741834_1010 (size=3066) 2024-11-07T12:52:15,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741834_1010 (size=3066) 2024-11-07T12:52:15,192 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/oldWALs 2024-11-07T12:52:15,192 INFO [RS:0;db9ad1cb6cf9:43943 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db9ad1cb6cf9%2C43943%2C1730983840278.meta:.meta(num 1730983841983) 2024-11-07T12:52:15,192 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:15,192 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:15,193 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:15,193 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:15,193 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:15,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741847_1023 (size=12695) 2024-11-07T12:52:15,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741847_1023 (size=12695) 2024-11-07T12:52:15,199 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/oldWALs 2024-11-07T12:52:15,199 INFO [RS:0;db9ad1cb6cf9:43943 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db9ad1cb6cf9%2C43943%2C1730983840278:(num 1730983914933) 2024-11-07T12:52:15,199 DEBUG [RS:0;db9ad1cb6cf9:43943 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:15,199 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:52:15,199 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:52:15,199 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.ChoreService(370): Chore service for: regionserver/db9ad1cb6cf9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-07T12:52:15,199 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:52:15,200 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:52:15,200 INFO [RS:0;db9ad1cb6cf9:43943 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43943 2024-11-07T12:52:15,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:52:15,204 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db9ad1cb6cf9,43943,1730983840278 2024-11-07T12:52:15,204 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:52:15,206 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db9ad1cb6cf9,43943,1730983840278] 2024-11-07T12:52:15,207 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/db9ad1cb6cf9,43943,1730983840278 already deleted, retry=false 2024-11-07T12:52:15,208 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; db9ad1cb6cf9,43943,1730983840278 expired; onlineServers=0 2024-11-07T12:52:15,208 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'db9ad1cb6cf9,33149,1730983839599' ***** 2024-11-07T12:52:15,208 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T12:52:15,208 INFO [M:0;db9ad1cb6cf9:33149 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:52:15,208 INFO [M:0;db9ad1cb6cf9:33149 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:52:15,208 DEBUG [M:0;db9ad1cb6cf9:33149 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T12:52:15,208 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T12:52:15,208 DEBUG [M:0;db9ad1cb6cf9:33149 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T12:52:15,208 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730983841233 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730983841233,5,FailOnTimeoutGroup] 2024-11-07T12:52:15,208 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730983841237 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730983841237,5,FailOnTimeoutGroup] 2024-11-07T12:52:15,208 INFO [M:0;db9ad1cb6cf9:33149 {}] hbase.ChoreService(370): Chore service for: master/db9ad1cb6cf9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-07T12:52:15,208 INFO [M:0;db9ad1cb6cf9:33149 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:52:15,209 DEBUG [M:0;db9ad1cb6cf9:33149 {}] master.HMaster(1795): Stopping service threads 2024-11-07T12:52:15,209 INFO [M:0;db9ad1cb6cf9:33149 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T12:52:15,209 INFO [M:0;db9ad1cb6cf9:33149 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:52:15,209 INFO [M:0;db9ad1cb6cf9:33149 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T12:52:15,209 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T12:52:15,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:52:15,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:15,210 DEBUG [M:0;db9ad1cb6cf9:33149 {}] zookeeper.ZKUtil(347): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T12:52:15,210 WARN [M:0;db9ad1cb6cf9:33149 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T12:52:15,211 INFO [M:0;db9ad1cb6cf9:33149 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/.lastflushedseqids 2024-11-07T12:52:15,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741854_1030 (size=130) 2024-11-07T12:52:15,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741854_1030 (size=130) 2024-11-07T12:52:15,223 INFO [M:0;db9ad1cb6cf9:33149 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-07T12:52:15,223 INFO [M:0;db9ad1cb6cf9:33149 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T12:52:15,223 DEBUG [M:0;db9ad1cb6cf9:33149 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:52:15,223 INFO [M:0;db9ad1cb6cf9:33149 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:52:15,223 DEBUG [M:0;db9ad1cb6cf9:33149 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:52:15,223 DEBUG [M:0;db9ad1cb6cf9:33149 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:52:15,223 DEBUG [M:0;db9ad1cb6cf9:33149 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:52:15,224 INFO [M:0;db9ad1cb6cf9:33149 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-07T12:52:15,241 DEBUG [M:0;db9ad1cb6cf9:33149 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/68853f8699214453a4cb814b136026ac is 82, key is hbase:meta,,1/info:regioninfo/1730983842056/Put/seqid=0 2024-11-07T12:52:15,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741855_1031 (size=5672) 2024-11-07T12:52:15,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741855_1031 (size=5672) 2024-11-07T12:52:15,249 INFO [M:0;db9ad1cb6cf9:33149 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/68853f8699214453a4cb814b136026ac 2024-11-07T12:52:15,271 DEBUG [M:0;db9ad1cb6cf9:33149 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/27585074dc7044d28a4ff227f7e01626 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1730983843262/Put/seqid=0 2024-11-07T12:52:15,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741856_1032 (size=6248) 2024-11-07T12:52:15,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741856_1032 (size=6248) 2024-11-07T12:52:15,277 INFO [M:0;db9ad1cb6cf9:33149 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/27585074dc7044d28a4ff227f7e01626 2024-11-07T12:52:15,283 INFO [M:0;db9ad1cb6cf9:33149 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 27585074dc7044d28a4ff227f7e01626 2024-11-07T12:52:15,299 DEBUG [M:0;db9ad1cb6cf9:33149 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/51fc3cc6294b4406ba45380771e00e3c is 69, key is db9ad1cb6cf9,43943,1730983840278/rs:state/1730983841342/Put/seqid=0 2024-11-07T12:52:15,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741857_1033 (size=5156) 2024-11-07T12:52:15,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741857_1033 (size=5156) 2024-11-07T12:52:15,305 INFO [M:0;db9ad1cb6cf9:33149 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/51fc3cc6294b4406ba45380771e00e3c 2024-11-07T12:52:15,306 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:52:15,306 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43943-0x1001a4b4eb80001, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:52:15,307 INFO [RS:0;db9ad1cb6cf9:43943 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:52:15,307 INFO [RS:0;db9ad1cb6cf9:43943 {}] regionserver.HRegionServer(1031): Exiting; stopping=db9ad1cb6cf9,43943,1730983840278; zookeeper connection closed. 2024-11-07T12:52:15,307 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@993a4fe {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@993a4fe 2024-11-07T12:52:15,308 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-07T12:52:15,327 DEBUG [M:0;db9ad1cb6cf9:33149 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9b26a10ab5584b73a6f122771311fe55 is 52, key is load_balancer_on/state:d/1730983842365/Put/seqid=0 2024-11-07T12:52:15,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741858_1034 (size=5056) 2024-11-07T12:52:15,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741858_1034 (size=5056) 2024-11-07T12:52:15,334 INFO [M:0;db9ad1cb6cf9:33149 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9b26a10ab5584b73a6f122771311fe55 2024-11-07T12:52:15,340 DEBUG [M:0;db9ad1cb6cf9:33149 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/68853f8699214453a4cb814b136026ac as hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/68853f8699214453a4cb814b136026ac 2024-11-07T12:52:15,346 INFO [M:0;db9ad1cb6cf9:33149 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/68853f8699214453a4cb814b136026ac, entries=8, sequenceid=59, filesize=5.5 K 2024-11-07T12:52:15,347 DEBUG [M:0;db9ad1cb6cf9:33149 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/27585074dc7044d28a4ff227f7e01626 as hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/27585074dc7044d28a4ff227f7e01626 2024-11-07T12:52:15,353 INFO [M:0;db9ad1cb6cf9:33149 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 27585074dc7044d28a4ff227f7e01626 2024-11-07T12:52:15,353 INFO [M:0;db9ad1cb6cf9:33149 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/27585074dc7044d28a4ff227f7e01626, entries=6, sequenceid=59, filesize=6.1 K 2024-11-07T12:52:15,354 DEBUG [M:0;db9ad1cb6cf9:33149 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/51fc3cc6294b4406ba45380771e00e3c as hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/51fc3cc6294b4406ba45380771e00e3c 2024-11-07T12:52:15,360 INFO [M:0;db9ad1cb6cf9:33149 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/51fc3cc6294b4406ba45380771e00e3c, entries=1, sequenceid=59, filesize=5.0 K 2024-11-07T12:52:15,361 DEBUG [M:0;db9ad1cb6cf9:33149 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9b26a10ab5584b73a6f122771311fe55 as hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9b26a10ab5584b73a6f122771311fe55 2024-11-07T12:52:15,367 INFO [M:0;db9ad1cb6cf9:33149 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9b26a10ab5584b73a6f122771311fe55, entries=1, sequenceid=59, filesize=4.9 K 2024-11-07T12:52:15,369 INFO [M:0;db9ad1cb6cf9:33149 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=59, compaction requested=false 2024-11-07T12:52:15,370 INFO [M:0;db9ad1cb6cf9:33149 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:52:15,371 DEBUG [M:0;db9ad1cb6cf9:33149 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730983935223Disabling compacts and flushes for region at 1730983935223Disabling writes for close at 1730983935223Obtaining lock to block concurrent updates at 1730983935224 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1730983935224Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1730983935224Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1730983935225 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1730983935225Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1730983935241 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1730983935241Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1730983935255 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1730983935270 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1730983935270Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1730983935283 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1730983935298 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1730983935298Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1730983935312 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1730983935327 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1730983935327Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23e964c3: reopening flushed file at 1730983935339 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ad58ec7: reopening flushed file at 1730983935346 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4810acd3: reopening flushed file at 1730983935353 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@70babdb2: reopening flushed file at 1730983935360 (+7 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=59, compaction requested=false at 1730983935369 (+9 ms)Writing region close event to WAL at 1730983935370 (+1 ms)Closed at 1730983935370 2024-11-07T12:52:15,371 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:15,371 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:15,372 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:15,372 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:15,372 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:15,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741830_1006 (size=27985) 2024-11-07T12:52:15,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38133 is added to blk_1073741830_1006 (size=27985) 2024-11-07T12:52:15,375 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:52:15,375 INFO [M:0;db9ad1cb6cf9:33149 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-07T12:52:15,375 INFO [M:0;db9ad1cb6cf9:33149 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33149 2024-11-07T12:52:15,375 INFO [M:0;db9ad1cb6cf9:33149 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:52:15,424 INFO [regionserver/db9ad1cb6cf9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:52:15,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:52:15,478 INFO [M:0;db9ad1cb6cf9:33149 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:52:15,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1001a4b4eb80000, quorum=127.0.0.1:56842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:52:15,482 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:52:15,485 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:52:15,485 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:52:15,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:52:15,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/hadoop.log.dir/,STOPPED} 2024-11-07T12:52:15,488 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:52:15,488 WARN [BP-929681960-172.17.0.2-1730983836734 heartbeating to localhost/127.0.0.1:34313 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:52:15,488 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:52:15,488 WARN [BP-929681960-172.17.0.2-1730983836734 heartbeating to localhost/127.0.0.1:34313 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-929681960-172.17.0.2-1730983836734 (Datanode Uuid 37a16055-f5d5-4efc-a01e-5deeb55ed03a) service to localhost/127.0.0.1:34313 2024-11-07T12:52:15,489 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/cluster_302973cd-33a2-b9c9-1f63-db82430c09fc/data/data3/current/BP-929681960-172.17.0.2-1730983836734 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:52:15,490 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/cluster_302973cd-33a2-b9c9-1f63-db82430c09fc/data/data4/current/BP-929681960-172.17.0.2-1730983836734 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:52:15,490 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:52:15,492 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:52:15,492 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:52:15,492 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:52:15,492 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:52:15,493 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/hadoop.log.dir/,STOPPED} 2024-11-07T12:52:15,494 WARN [BP-929681960-172.17.0.2-1730983836734 heartbeating to localhost/127.0.0.1:34313 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:52:15,494 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:52:15,494 WARN [BP-929681960-172.17.0.2-1730983836734 heartbeating to localhost/127.0.0.1:34313 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-929681960-172.17.0.2-1730983836734 (Datanode Uuid 18aca364-8f18-4fb2-a6df-b6b7da64cc15) service to localhost/127.0.0.1:34313 2024-11-07T12:52:15,494 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:52:15,495 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/cluster_302973cd-33a2-b9c9-1f63-db82430c09fc/data/data1/current/BP-929681960-172.17.0.2-1730983836734 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:52:15,495 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/cluster_302973cd-33a2-b9c9-1f63-db82430c09fc/data/data2/current/BP-929681960-172.17.0.2-1730983836734 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:52:15,495 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:52:15,504 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:52:15,505 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:52:15,505 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:52:15,505 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:52:15,505 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/hadoop.log.dir/,STOPPED} 2024-11-07T12:52:15,513 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-07T12:52:15,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-07T12:52:15,551 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34313 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/db9ad1cb6cf9:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/db9ad1cb6cf9:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@18e1792d java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34313 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34313 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34313 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34313 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34313 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:34313 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34313 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/db9ad1cb6cf9:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=80 (was 263), ProcessCount=11 (was 11), AvailableMemoryMB=8574 (was 9476) 2024-11-07T12:52:15,558 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=80, ProcessCount=11, AvailableMemoryMB=8574 2024-11-07T12:52:15,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T12:52:15,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/hadoop.log.dir so I do NOT create it in target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3 2024-11-07T12:52:15,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4c18b10-cdb4-9037-27f7-18e81195fb48/hadoop.tmp.dir so I do NOT create it in target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3 2024-11-07T12:52:15,558 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/cluster_650ce62c-94fb-5950-fddf-7d7a279eeef4, deleteOnExit=true 2024-11-07T12:52:15,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-07T12:52:15,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/test.cache.data in system properties and HBase conf 2024-11-07T12:52:15,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T12:52:15,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/hadoop.log.dir in system properties and HBase conf 2024-11-07T12:52:15,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T12:52:15,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T12:52:15,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-07T12:52:15,559 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T12:52:15,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:52:15,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:52:15,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T12:52:15,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:52:15,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T12:52:15,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T12:52:15,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:52:15,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:52:15,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T12:52:15,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/nfs.dump.dir in system properties and HBase conf 2024-11-07T12:52:15,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/java.io.tmpdir in system properties and HBase conf 2024-11-07T12:52:15,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:52:15,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T12:52:15,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T12:52:15,574 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:52:15,646 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:52:15,652 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:52:15,653 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:52:15,653 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:52:15,653 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:52:15,657 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:52:15,658 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59505eb5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:52:15,658 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f681677{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:52:15,775 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b8ef2ff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/java.io.tmpdir/jetty-localhost-35009-hadoop-hdfs-3_4_1-tests_jar-_-any-16039000690102495114/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:52:15,775 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10885b70{HTTP/1.1, (http/1.1)}{localhost:35009} 2024-11-07T12:52:15,776 INFO [Time-limited test {}] server.Server(415): Started @101045ms 2024-11-07T12:52:15,789 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:52:15,859 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:52:15,863 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:52:15,865 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:52:15,865 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:52:15,865 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:52:15,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69a0f3c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:52:15,866 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@469dec96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:52:15,981 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a15ed6a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/java.io.tmpdir/jetty-localhost-40489-hadoop-hdfs-3_4_1-tests_jar-_-any-15300755599016900881/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:52:15,982 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@b5aedfa{HTTP/1.1, (http/1.1)}{localhost:40489} 2024-11-07T12:52:15,982 INFO [Time-limited test {}] server.Server(415): Started @101251ms 2024-11-07T12:52:15,984 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:52:16,022 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:52:16,027 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:52:16,028 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:52:16,028 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:52:16,028 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:52:16,029 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@194f043a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:52:16,029 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@274298f3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:52:16,076 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/cluster_650ce62c-94fb-5950-fddf-7d7a279eeef4/data/data2/current/BP-973058567-172.17.0.2-1730983935592/current, will proceed with Du for space computation calculation, 2024-11-07T12:52:16,076 WARN [Thread-436 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/cluster_650ce62c-94fb-5950-fddf-7d7a279eeef4/data/data1/current/BP-973058567-172.17.0.2-1730983935592/current, will proceed with Du for space computation calculation, 2024-11-07T12:52:16,097 WARN [Thread-415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:52:16,099 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6fe1ba734ee7ea4e with lease ID 0xbc4bfe0eb4309be5: Processing first storage report for DS-41173e0c-72c7-4aef-9c9a-68ca170bc93c from datanode DatanodeRegistration(127.0.0.1:41973, datanodeUuid=507ce806-07c9-4a82-aaaa-e69ed5a7a205, infoPort=42883, infoSecurePort=0, ipcPort=43901, storageInfo=lv=-57;cid=testClusterID;nsid=207589964;c=1730983935592) 2024-11-07T12:52:16,100 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6fe1ba734ee7ea4e with lease ID 0xbc4bfe0eb4309be5: from storage DS-41173e0c-72c7-4aef-9c9a-68ca170bc93c node DatanodeRegistration(127.0.0.1:41973, datanodeUuid=507ce806-07c9-4a82-aaaa-e69ed5a7a205, infoPort=42883, infoSecurePort=0, ipcPort=43901, storageInfo=lv=-57;cid=testClusterID;nsid=207589964;c=1730983935592), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:52:16,100 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6fe1ba734ee7ea4e with lease ID 0xbc4bfe0eb4309be5: Processing first storage report for DS-9f8ccd41-4533-425c-b64d-ccad9d27ebe3 from datanode DatanodeRegistration(127.0.0.1:41973, datanodeUuid=507ce806-07c9-4a82-aaaa-e69ed5a7a205, infoPort=42883, infoSecurePort=0, ipcPort=43901, storageInfo=lv=-57;cid=testClusterID;nsid=207589964;c=1730983935592) 2024-11-07T12:52:16,100 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6fe1ba734ee7ea4e with lease ID 0xbc4bfe0eb4309be5: from storage DS-9f8ccd41-4533-425c-b64d-ccad9d27ebe3 node DatanodeRegistration(127.0.0.1:41973, datanodeUuid=507ce806-07c9-4a82-aaaa-e69ed5a7a205, infoPort=42883, infoSecurePort=0, ipcPort=43901, storageInfo=lv=-57;cid=testClusterID;nsid=207589964;c=1730983935592), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:52:16,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@18492d7d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/java.io.tmpdir/jetty-localhost-40021-hadoop-hdfs-3_4_1-tests_jar-_-any-466490938590209587/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:52:16,153 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30ebe7e3{HTTP/1.1, (http/1.1)}{localhost:40021} 2024-11-07T12:52:16,153 INFO [Time-limited test {}] server.Server(415): Started @101422ms 2024-11-07T12:52:16,155 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:52:16,242 WARN [Thread-462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/cluster_650ce62c-94fb-5950-fddf-7d7a279eeef4/data/data3/current/BP-973058567-172.17.0.2-1730983935592/current, will proceed with Du for space computation calculation, 2024-11-07T12:52:16,243 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/cluster_650ce62c-94fb-5950-fddf-7d7a279eeef4/data/data4/current/BP-973058567-172.17.0.2-1730983935592/current, will proceed with Du for space computation calculation, 2024-11-07T12:52:16,260 WARN [Thread-451 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:52:16,263 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x26f317983ed36158 with lease ID 0xbc4bfe0eb4309be6: Processing first storage report for DS-04ec031c-2046-4ede-b30d-f55401d6f6c7 from datanode DatanodeRegistration(127.0.0.1:46333, datanodeUuid=e50fc946-4ca3-46bd-b5ef-fe776876ee42, infoPort=45961, infoSecurePort=0, ipcPort=33913, storageInfo=lv=-57;cid=testClusterID;nsid=207589964;c=1730983935592) 2024-11-07T12:52:16,263 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x26f317983ed36158 with lease ID 0xbc4bfe0eb4309be6: from storage DS-04ec031c-2046-4ede-b30d-f55401d6f6c7 node DatanodeRegistration(127.0.0.1:46333, datanodeUuid=e50fc946-4ca3-46bd-b5ef-fe776876ee42, infoPort=45961, infoSecurePort=0, ipcPort=33913, storageInfo=lv=-57;cid=testClusterID;nsid=207589964;c=1730983935592), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:52:16,263 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x26f317983ed36158 with lease ID 0xbc4bfe0eb4309be6: Processing first storage report for DS-afded6ea-eb81-4a7b-8c57-085531996f28 from datanode DatanodeRegistration(127.0.0.1:46333, datanodeUuid=e50fc946-4ca3-46bd-b5ef-fe776876ee42, infoPort=45961, infoSecurePort=0, ipcPort=33913, storageInfo=lv=-57;cid=testClusterID;nsid=207589964;c=1730983935592) 2024-11-07T12:52:16,263 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x26f317983ed36158 with lease ID 0xbc4bfe0eb4309be6: from storage DS-afded6ea-eb81-4a7b-8c57-085531996f28 node DatanodeRegistration(127.0.0.1:46333, datanodeUuid=e50fc946-4ca3-46bd-b5ef-fe776876ee42, infoPort=45961, infoSecurePort=0, ipcPort=33913, storageInfo=lv=-57;cid=testClusterID;nsid=207589964;c=1730983935592), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:52:16,283 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3 2024-11-07T12:52:16,286 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/cluster_650ce62c-94fb-5950-fddf-7d7a279eeef4/zookeeper_0, clientPort=53058, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/cluster_650ce62c-94fb-5950-fddf-7d7a279eeef4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/cluster_650ce62c-94fb-5950-fddf-7d7a279eeef4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T12:52:16,287 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53058 2024-11-07T12:52:16,287 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:16,289 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:16,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:52:16,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:52:16,300 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544 with version=8 2024-11-07T12:52:16,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/hbase-staging 2024-11-07T12:52:16,303 INFO [Time-limited test {}] client.ConnectionUtils(128): master/db9ad1cb6cf9:0 server-side Connection retries=45 2024-11-07T12:52:16,303 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:52:16,304 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:52:16,304 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:52:16,304 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:52:16,304 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:52:16,304 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-07T12:52:16,304 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:52:16,305 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41851 2024-11-07T12:52:16,307 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41851 connecting to ZooKeeper ensemble=127.0.0.1:53058 2024-11-07T12:52:16,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:418510x0, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:52:16,313 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41851-0x1001a4ccbbc0000 connected 2024-11-07T12:52:16,328 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:16,329 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:16,332 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:52:16,332 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544, hbase.cluster.distributed=false 2024-11-07T12:52:16,334 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:52:16,334 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41851 2024-11-07T12:52:16,334 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41851 2024-11-07T12:52:16,335 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41851 2024-11-07T12:52:16,335 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41851 2024-11-07T12:52:16,335 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41851 2024-11-07T12:52:16,352 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/db9ad1cb6cf9:0 server-side Connection retries=45 2024-11-07T12:52:16,352 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:52:16,352 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:52:16,352 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:52:16,352 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:52:16,352 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:52:16,353 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:52:16,353 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:52:16,353 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37749 2024-11-07T12:52:16,355 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37749 connecting to ZooKeeper ensemble=127.0.0.1:53058 2024-11-07T12:52:16,355 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:16,358 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:16,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:377490x0, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:52:16,363 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37749-0x1001a4ccbbc0001 connected 2024-11-07T12:52:16,363 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:52:16,364 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:52:16,364 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:52:16,365 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:52:16,366 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:52:16,367 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37749 2024-11-07T12:52:16,367 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37749 2024-11-07T12:52:16,368 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37749 2024-11-07T12:52:16,369 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37749 2024-11-07T12:52:16,369 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37749 2024-11-07T12:52:16,382 DEBUG [M:0;db9ad1cb6cf9:41851 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db9ad1cb6cf9:41851 2024-11-07T12:52:16,382 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/db9ad1cb6cf9,41851,1730983936303 2024-11-07T12:52:16,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:52:16,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:52:16,385 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db9ad1cb6cf9,41851,1730983936303 2024-11-07T12:52:16,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:52:16,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:16,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:16,387 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:52:16,388 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db9ad1cb6cf9,41851,1730983936303 from backup master directory 2024-11-07T12:52:16,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:52:16,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db9ad1cb6cf9,41851,1730983936303 2024-11-07T12:52:16,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:52:16,389 WARN [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:52:16,389 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db9ad1cb6cf9,41851,1730983936303 2024-11-07T12:52:16,395 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/hbase.id] with ID: efe8e26f-6335-4c8f-8d61-3cd718853e17 2024-11-07T12:52:16,395 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/.tmp/hbase.id 2024-11-07T12:52:16,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:52:16,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:52:16,402 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/.tmp/hbase.id]:[hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/hbase.id] 2024-11-07T12:52:16,419 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:16,419 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-07T12:52:16,421 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-07T12:52:16,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:16,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:16,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:52:16,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:52:16,437 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:52:16,438 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T12:52:16,439 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:52:16,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:52:16,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:52:16,449 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store 2024-11-07T12:52:16,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:52:16,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:52:16,459 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:52:16,459 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:52:16,459 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:52:16,459 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:52:16,459 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:52:16,459 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:52:16,459 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:52:16,459 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730983936459Disabling compacts and flushes for region at 1730983936459Disabling writes for close at 1730983936459Writing region close event to WAL at 1730983936459Closed at 1730983936459 2024-11-07T12:52:16,461 WARN [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/.initializing 2024-11-07T12:52:16,461 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/WALs/db9ad1cb6cf9,41851,1730983936303 2024-11-07T12:52:16,464 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C41851%2C1730983936303, suffix=, logDir=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/WALs/db9ad1cb6cf9,41851,1730983936303, archiveDir=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/oldWALs, maxLogs=10 2024-11-07T12:52:16,465 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C41851%2C1730983936303.1730983936465 2024-11-07T12:52:16,470 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/WALs/db9ad1cb6cf9,41851,1730983936303/db9ad1cb6cf9%2C41851%2C1730983936303.1730983936465 2024-11-07T12:52:16,471 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45961:45961),(127.0.0.1/127.0.0.1:42883:42883)] 2024-11-07T12:52:16,472 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:52:16,472 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:52:16,472 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:16,472 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:16,475 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:16,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T12:52:16,476 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:16,477 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:16,477 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:16,478 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T12:52:16,479 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:16,479 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:52:16,479 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:16,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T12:52:16,482 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:16,482 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:52:16,482 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:16,484 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T12:52:16,484 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:16,484 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:52:16,484 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:16,485 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:16,486 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:16,487 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:16,487 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:16,488 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T12:52:16,489 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:16,492 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:52:16,492 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821634, jitterRate=0.04476267099380493}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T12:52:16,493 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1730983936472Initializing all the Stores at 1730983936473 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983936473Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983936474 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983936474Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983936475 (+1 ms)Cleaning up temporary data from old regions at 1730983936487 (+12 ms)Region opened successfully at 1730983936493 (+6 ms) 2024-11-07T12:52:16,494 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T12:52:16,499 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@653cb19e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9ad1cb6cf9/172.17.0.2:0 2024-11-07T12:52:16,500 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-07T12:52:16,500 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T12:52:16,500 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T12:52:16,500 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T12:52:16,501 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-07T12:52:16,502 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-07T12:52:16,502 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T12:52:16,507 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T12:52:16,508 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T12:52:16,509 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-07T12:52:16,510 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T12:52:16,511 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T12:52:16,512 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-07T12:52:16,512 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T12:52:16,514 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T12:52:16,515 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-07T12:52:16,516 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T12:52:16,517 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T12:52:16,519 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T12:52:16,521 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T12:52:16,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:52:16,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:52:16,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:16,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:16,523 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=db9ad1cb6cf9,41851,1730983936303, sessionid=0x1001a4ccbbc0000, setting cluster-up flag (Was=false) 2024-11-07T12:52:16,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:16,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:16,531 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T12:52:16,532 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9ad1cb6cf9,41851,1730983936303 2024-11-07T12:52:16,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:16,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:16,540 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T12:52:16,541 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9ad1cb6cf9,41851,1730983936303 2024-11-07T12:52:16,543 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-07T12:52:16,545 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-07T12:52:16,545 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-07T12:52:16,545 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T12:52:16,545 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db9ad1cb6cf9,41851,1730983936303 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T12:52:16,547 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:52:16,547 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:52:16,547 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:52:16,547 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:52:16,547 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db9ad1cb6cf9:0, corePoolSize=10, maxPoolSize=10 2024-11-07T12:52:16,547 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:16,547 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:52:16,547 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:16,548 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730983966548 2024-11-07T12:52:16,549 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T12:52:16,549 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T12:52:16,549 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T12:52:16,549 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T12:52:16,549 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T12:52:16,549 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T12:52:16,549 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:16,549 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:52:16,549 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-07T12:52:16,549 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T12:52:16,549 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T12:52:16,549 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T12:52:16,550 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T12:52:16,550 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T12:52:16,550 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730983936550,5,FailOnTimeoutGroup] 2024-11-07T12:52:16,550 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730983936550,5,FailOnTimeoutGroup] 2024-11-07T12:52:16,550 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:16,550 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:16,551 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T12:52:16,551 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:16,551 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:16,551 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T12:52:16,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:52:16,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:52:16,560 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-07T12:52:16,560 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544 2024-11-07T12:52:16,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:52:16,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:52:16,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:52:16,570 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:52:16,571 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HRegionServer(746): ClusterId : efe8e26f-6335-4c8f-8d61-3cd718853e17 2024-11-07T12:52:16,571 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:52:16,572 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:52:16,572 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:16,572 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:16,573 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:52:16,573 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:52:16,573 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:52:16,574 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:52:16,574 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:16,575 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:16,575 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:52:16,576 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:52:16,576 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c020847, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9ad1cb6cf9/172.17.0.2:0 2024-11-07T12:52:16,577 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:52:16,577 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:16,577 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:16,578 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:52:16,579 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:52:16,579 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:16,580 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:16,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:52:16,581 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/data/hbase/meta/1588230740 2024-11-07T12:52:16,581 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/data/hbase/meta/1588230740 2024-11-07T12:52:16,583 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:52:16,583 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:52:16,584 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:52:16,585 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:52:16,588 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:52:16,588 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=731070, jitterRate=-0.0703965276479721}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:52:16,589 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1730983936568Initializing all the Stores at 1730983936569 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983936569Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983936569Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983936569Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983936569Cleaning up temporary data from old regions at 1730983936583 (+14 ms)Region opened successfully at 1730983936589 (+6 ms) 2024-11-07T12:52:16,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:52:16,590 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:52:16,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:52:16,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:52:16,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:52:16,590 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db9ad1cb6cf9:37749 2024-11-07T12:52:16,590 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T12:52:16,590 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T12:52:16,590 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T12:52:16,590 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:52:16,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730983936589Disabling compacts and flushes for region at 1730983936589Disabling writes for close at 1730983936590 (+1 ms)Writing region close event to WAL at 1730983936590Closed at 1730983936590 2024-11-07T12:52:16,591 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HRegionServer(2659): reportForDuty to master=db9ad1cb6cf9,41851,1730983936303 with port=37749, startcode=1730983936352 2024-11-07T12:52:16,591 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:52:16,592 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:52:16,592 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-07T12:52:16,592 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T12:52:16,594 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:52:16,595 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41867, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:52:16,595 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41851 {}] master.ServerManager(363): Checking decommissioned status of RegionServer db9ad1cb6cf9,37749,1730983936352 2024-11-07T12:52:16,596 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41851 {}] master.ServerManager(517): Registering regionserver=db9ad1cb6cf9,37749,1730983936352 2024-11-07T12:52:16,597 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T12:52:16,598 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544 2024-11-07T12:52:16,598 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34841 2024-11-07T12:52:16,598 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T12:52:16,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:52:16,600 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] zookeeper.ZKUtil(111): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db9ad1cb6cf9,37749,1730983936352 2024-11-07T12:52:16,600 WARN [RS:0;db9ad1cb6cf9:37749 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:52:16,600 INFO [RS:0;db9ad1cb6cf9:37749 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:52:16,601 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/WALs/db9ad1cb6cf9,37749,1730983936352 2024-11-07T12:52:16,601 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db9ad1cb6cf9,37749,1730983936352] 2024-11-07T12:52:16,605 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:52:16,609 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:52:16,609 INFO [RS:0;db9ad1cb6cf9:37749 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:52:16,609 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:16,610 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T12:52:16,610 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T12:52:16,611 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:16,611 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:16,611 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:16,611 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:16,611 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:16,611 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:16,611 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db9ad1cb6cf9:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:52:16,611 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:16,611 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:16,611 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:16,611 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:16,611 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:16,611 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:16,611 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db9ad1cb6cf9:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:52:16,611 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:52:16,612 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:16,612 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:16,612 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:16,612 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:16,613 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:16,613 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,37749,1730983936352-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:52:16,632 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:52:16,632 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,37749,1730983936352-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:16,632 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:16,632 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.Replication(171): db9ad1cb6cf9,37749,1730983936352 started 2024-11-07T12:52:16,647 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:16,647 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HRegionServer(1482): Serving as db9ad1cb6cf9,37749,1730983936352, RpcServer on db9ad1cb6cf9/172.17.0.2:37749, sessionid=0x1001a4ccbbc0001 2024-11-07T12:52:16,648 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:52:16,648 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db9ad1cb6cf9,37749,1730983936352 2024-11-07T12:52:16,648 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9ad1cb6cf9,37749,1730983936352' 2024-11-07T12:52:16,648 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:52:16,648 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:52:16,649 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:52:16,649 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:52:16,649 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db9ad1cb6cf9,37749,1730983936352 2024-11-07T12:52:16,649 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9ad1cb6cf9,37749,1730983936352' 2024-11-07T12:52:16,649 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:52:16,650 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:52:16,650 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:52:16,650 INFO [RS:0;db9ad1cb6cf9:37749 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:52:16,650 INFO [RS:0;db9ad1cb6cf9:37749 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:52:16,747 WARN [db9ad1cb6cf9:41851 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-07T12:52:16,753 INFO [RS:0;db9ad1cb6cf9:37749 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C37749%2C1730983936352, suffix=, logDir=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/WALs/db9ad1cb6cf9,37749,1730983936352, archiveDir=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/oldWALs, maxLogs=32 2024-11-07T12:52:16,755 INFO [RS:0;db9ad1cb6cf9:37749 {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C37749%2C1730983936352.1730983936754 2024-11-07T12:52:16,762 INFO [RS:0;db9ad1cb6cf9:37749 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/WALs/db9ad1cb6cf9,37749,1730983936352/db9ad1cb6cf9%2C37749%2C1730983936352.1730983936754 2024-11-07T12:52:16,762 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42883:42883),(127.0.0.1/127.0.0.1:45961:45961)] 2024-11-07T12:52:16,997 DEBUG [db9ad1cb6cf9:41851 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-07T12:52:16,998 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db9ad1cb6cf9,37749,1730983936352 2024-11-07T12:52:17,000 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9ad1cb6cf9,37749,1730983936352, state=OPENING 2024-11-07T12:52:17,002 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T12:52:17,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:17,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:17,004 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:52:17,004 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:52:17,004 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:52:17,004 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,37749,1730983936352}] 2024-11-07T12:52:17,158 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T12:52:17,160 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40603, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T12:52:17,164 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-07T12:52:17,164 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:52:17,167 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C37749%2C1730983936352.meta, suffix=.meta, logDir=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/WALs/db9ad1cb6cf9,37749,1730983936352, archiveDir=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/oldWALs, maxLogs=32 2024-11-07T12:52:17,169 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C37749%2C1730983936352.meta.1730983937168.meta 2024-11-07T12:52:17,175 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/WALs/db9ad1cb6cf9,37749,1730983936352/db9ad1cb6cf9%2C37749%2C1730983936352.meta.1730983937168.meta 2024-11-07T12:52:17,176 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45961:45961),(127.0.0.1/127.0.0.1:42883:42883)] 2024-11-07T12:52:17,176 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:52:17,177 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T12:52:17,177 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T12:52:17,177 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T12:52:17,177 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T12:52:17,177 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:52:17,177 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-07T12:52:17,177 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-07T12:52:17,179 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:52:17,180 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:52:17,180 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:17,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:17,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:52:17,182 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:52:17,182 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:17,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:17,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:52:17,184 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:52:17,184 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:17,185 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:17,185 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:52:17,186 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:52:17,186 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:17,186 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:17,186 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:52:17,187 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/data/hbase/meta/1588230740 2024-11-07T12:52:17,189 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/data/hbase/meta/1588230740 2024-11-07T12:52:17,190 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:52:17,190 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:52:17,191 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:52:17,192 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:52:17,193 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=810469, jitterRate=0.030564814805984497}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:52:17,193 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-07T12:52:17,194 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1730983937177Writing region info on filesystem at 1730983937177Initializing all the Stores at 1730983937179 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983937179Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983937179Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983937179Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983937179Cleaning up temporary data from old regions at 1730983937190 (+11 ms)Running coprocessor post-open hooks at 1730983937193 (+3 ms)Region opened successfully at 1730983937194 (+1 ms) 2024-11-07T12:52:17,196 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730983937157 2024-11-07T12:52:17,199 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T12:52:17,199 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-07T12:52:17,200 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db9ad1cb6cf9,37749,1730983936352 2024-11-07T12:52:17,201 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9ad1cb6cf9,37749,1730983936352, state=OPEN 2024-11-07T12:52:17,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:52:17,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:52:17,206 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,37749,1730983936352 2024-11-07T12:52:17,206 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:52:17,206 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:52:17,209 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T12:52:17,209 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,37749,1730983936352 in 202 msec 2024-11-07T12:52:17,212 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T12:52:17,212 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 617 msec 2024-11-07T12:52:17,214 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:52:17,214 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-07T12:52:17,215 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:52:17,215 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db9ad1cb6cf9,37749,1730983936352, seqNum=-1] 2024-11-07T12:52:17,216 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:52:17,217 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46385, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:52:17,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 678 msec 2024-11-07T12:52:17,224 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730983937224, completionTime=-1 2024-11-07T12:52:17,224 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-07T12:52:17,224 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-07T12:52:17,226 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-07T12:52:17,226 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730983997226 2024-11-07T12:52:17,226 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730984057226 2024-11-07T12:52:17,226 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-07T12:52:17,226 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,41851,1730983936303-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:17,226 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,41851,1730983936303-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:17,226 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,41851,1730983936303-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:17,227 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db9ad1cb6cf9:41851, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:17,227 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:17,227 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:17,229 DEBUG [master/db9ad1cb6cf9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-07T12:52:17,231 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.841sec 2024-11-07T12:52:17,231 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T12:52:17,232 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T12:52:17,232 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T12:52:17,232 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T12:52:17,232 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T12:52:17,232 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,41851,1730983936303-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:52:17,232 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,41851,1730983936303-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T12:52:17,234 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-07T12:52:17,235 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T12:52:17,235 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,41851,1730983936303-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:17,271 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@210f4ee9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:52:17,271 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request db9ad1cb6cf9,41851,-1 for getting cluster id 2024-11-07T12:52:17,272 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-07T12:52:17,273 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'efe8e26f-6335-4c8f-8d61-3cd718853e17' 2024-11-07T12:52:17,274 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-07T12:52:17,274 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "efe8e26f-6335-4c8f-8d61-3cd718853e17" 2024-11-07T12:52:17,275 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b8f8429, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:52:17,275 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [db9ad1cb6cf9,41851,-1] 2024-11-07T12:52:17,275 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-07T12:52:17,276 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:17,277 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56362, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-07T12:52:17,278 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bf88cd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:52:17,279 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:52:17,280 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db9ad1cb6cf9,37749,1730983936352, seqNum=-1] 2024-11-07T12:52:17,280 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:52:17,282 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51510, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:52:17,284 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=db9ad1cb6cf9,41851,1730983936303 2024-11-07T12:52:17,285 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:17,288 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-07T12:52:17,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-07T12:52:17,289 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:52:17,289 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:52:17,289 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:17,289 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:17,289 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-07T12:52:17,289 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T12:52:17,289 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=131514615, stopped=false 2024-11-07T12:52:17,289 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=db9ad1cb6cf9,41851,1730983936303 2024-11-07T12:52:17,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:52:17,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:17,291 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:52:17,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:52:17,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:17,291 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:52:17,292 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:52:17,292 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:17,292 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:52:17,292 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:52:17,292 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'db9ad1cb6cf9,37749,1730983936352' ***** 2024-11-07T12:52:17,292 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T12:52:17,293 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:52:17,293 INFO [RS:0;db9ad1cb6cf9:37749 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:52:17,293 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T12:52:17,293 INFO [RS:0;db9ad1cb6cf9:37749 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:52:17,293 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HRegionServer(959): stopping server db9ad1cb6cf9,37749,1730983936352 2024-11-07T12:52:17,293 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:52:17,293 INFO [RS:0;db9ad1cb6cf9:37749 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;db9ad1cb6cf9:37749. 2024-11-07T12:52:17,293 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:52:17,293 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:17,293 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:52:17,293 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:52:17,293 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:52:17,294 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-07T12:52:17,294 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-07T12:52:17,294 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-07T12:52:17,294 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-07T12:52:17,294 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:52:17,294 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:52:17,294 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:52:17,294 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:52:17,294 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:52:17,294 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-07T12:52:17,320 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/data/hbase/meta/1588230740/.tmp/ns/e17aef76798e4e49aae3baae8d49cc6d is 43, key is default/ns:d/1730983937218/Put/seqid=0 2024-11-07T12:52:17,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741835_1011 (size=5153) 2024-11-07T12:52:17,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741835_1011 (size=5153) 2024-11-07T12:52:17,327 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/data/hbase/meta/1588230740/.tmp/ns/e17aef76798e4e49aae3baae8d49cc6d 2024-11-07T12:52:17,335 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/data/hbase/meta/1588230740/.tmp/ns/e17aef76798e4e49aae3baae8d49cc6d as hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/data/hbase/meta/1588230740/ns/e17aef76798e4e49aae3baae8d49cc6d 2024-11-07T12:52:17,342 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/data/hbase/meta/1588230740/ns/e17aef76798e4e49aae3baae8d49cc6d, entries=2, sequenceid=6, filesize=5.0 K 2024-11-07T12:52:17,344 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 50ms, sequenceid=6, compaction requested=false 2024-11-07T12:52:17,349 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-07T12:52:17,350 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:52:17,350 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:52:17,350 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730983937294Running coprocessor pre-close hooks at 1730983937294Disabling compacts and flushes for region at 1730983937294Disabling writes for close at 1730983937294Obtaining lock to block concurrent updates at 1730983937294Preparing flush snapshotting stores in 1588230740 at 1730983937294Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1730983937295 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1730983937296 (+1 ms)Flushing 1588230740/ns: creating writer at 1730983937296Flushing 1588230740/ns: appending metadata at 1730983937320 (+24 ms)Flushing 1588230740/ns: closing flushed file at 1730983937320Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3294e25b: reopening flushed file at 1730983937334 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 50ms, sequenceid=6, compaction requested=false at 1730983937344 (+10 ms)Writing region close event to WAL at 1730983937345 (+1 ms)Running coprocessor post-close hooks at 1730983937349 (+4 ms)Closed at 1730983937350 (+1 ms) 2024-11-07T12:52:17,350 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T12:52:17,494 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HRegionServer(976): stopping server db9ad1cb6cf9,37749,1730983936352; all regions closed. 2024-11-07T12:52:17,495 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:17,495 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:17,495 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:17,495 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:17,495 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:17,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741834_1010 (size=1152) 2024-11-07T12:52:17,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741834_1010 (size=1152) 2024-11-07T12:52:17,501 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/oldWALs 2024-11-07T12:52:17,501 INFO [RS:0;db9ad1cb6cf9:37749 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db9ad1cb6cf9%2C37749%2C1730983936352.meta:.meta(num 1730983937168) 2024-11-07T12:52:17,502 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:17,502 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:17,502 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:17,502 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:17,502 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:17,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741833_1009 (size=93) 2024-11-07T12:52:17,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741833_1009 (size=93) 2024-11-07T12:52:17,613 INFO [regionserver/db9ad1cb6cf9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-07T12:52:17,613 INFO [regionserver/db9ad1cb6cf9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-07T12:52:17,908 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/oldWALs 2024-11-07T12:52:17,908 INFO [RS:0;db9ad1cb6cf9:37749 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db9ad1cb6cf9%2C37749%2C1730983936352:(num 1730983936754) 2024-11-07T12:52:17,908 DEBUG [RS:0;db9ad1cb6cf9:37749 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:17,908 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:52:17,908 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:52:17,908 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.ChoreService(370): Chore service for: regionserver/db9ad1cb6cf9:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-07T12:52:17,908 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:52:17,909 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:52:17,909 INFO [RS:0;db9ad1cb6cf9:37749 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37749 2024-11-07T12:52:17,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db9ad1cb6cf9,37749,1730983936352 2024-11-07T12:52:17,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:52:17,911 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:52:17,912 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db9ad1cb6cf9,37749,1730983936352] 2024-11-07T12:52:17,914 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/db9ad1cb6cf9,37749,1730983936352 already deleted, retry=false 2024-11-07T12:52:17,914 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; db9ad1cb6cf9,37749,1730983936352 expired; onlineServers=0 2024-11-07T12:52:17,914 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'db9ad1cb6cf9,41851,1730983936303' ***** 2024-11-07T12:52:17,914 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T12:52:17,914 INFO [M:0;db9ad1cb6cf9:41851 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:52:17,914 INFO [M:0;db9ad1cb6cf9:41851 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:52:17,914 DEBUG [M:0;db9ad1cb6cf9:41851 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T12:52:17,914 DEBUG [M:0;db9ad1cb6cf9:41851 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T12:52:17,914 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T12:52:17,914 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730983936550 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730983936550,5,FailOnTimeoutGroup] 2024-11-07T12:52:17,914 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730983936550 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730983936550,5,FailOnTimeoutGroup] 2024-11-07T12:52:17,915 INFO [M:0;db9ad1cb6cf9:41851 {}] hbase.ChoreService(370): Chore service for: master/db9ad1cb6cf9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-07T12:52:17,915 INFO [M:0;db9ad1cb6cf9:41851 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:52:17,915 DEBUG [M:0;db9ad1cb6cf9:41851 {}] master.HMaster(1795): Stopping service threads 2024-11-07T12:52:17,915 INFO [M:0;db9ad1cb6cf9:41851 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T12:52:17,915 INFO [M:0;db9ad1cb6cf9:41851 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:52:17,915 INFO [M:0;db9ad1cb6cf9:41851 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T12:52:17,915 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T12:52:17,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:52:17,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:17,916 DEBUG [M:0;db9ad1cb6cf9:41851 {}] zookeeper.ZKUtil(347): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T12:52:17,916 WARN [M:0;db9ad1cb6cf9:41851 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T12:52:17,917 INFO [M:0;db9ad1cb6cf9:41851 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/.lastflushedseqids 2024-11-07T12:52:17,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741836_1012 (size=99) 2024-11-07T12:52:17,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741836_1012 (size=99) 2024-11-07T12:52:17,924 INFO [M:0;db9ad1cb6cf9:41851 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-07T12:52:17,924 INFO [M:0;db9ad1cb6cf9:41851 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T12:52:17,924 DEBUG [M:0;db9ad1cb6cf9:41851 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:52:17,925 INFO [M:0;db9ad1cb6cf9:41851 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:52:17,925 DEBUG [M:0;db9ad1cb6cf9:41851 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:52:17,925 DEBUG [M:0;db9ad1cb6cf9:41851 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:52:17,925 DEBUG [M:0;db9ad1cb6cf9:41851 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:52:17,925 INFO [M:0;db9ad1cb6cf9:41851 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-07T12:52:17,943 DEBUG [M:0;db9ad1cb6cf9:41851 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2479fa14061f4729b2ba1007182b7471 is 82, key is hbase:meta,,1/info:regioninfo/1730983937200/Put/seqid=0 2024-11-07T12:52:17,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741837_1013 (size=5672) 2024-11-07T12:52:17,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741837_1013 (size=5672) 2024-11-07T12:52:17,949 INFO [M:0;db9ad1cb6cf9:41851 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2479fa14061f4729b2ba1007182b7471 2024-11-07T12:52:17,980 DEBUG [M:0;db9ad1cb6cf9:41851 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/13543389e8b04aa8ab878b11b62e75fb is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1730983937223/Put/seqid=0 2024-11-07T12:52:17,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741838_1014 (size=5275) 2024-11-07T12:52:17,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741838_1014 (size=5275) 2024-11-07T12:52:17,987 INFO [M:0;db9ad1cb6cf9:41851 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/13543389e8b04aa8ab878b11b62e75fb 2024-11-07T12:52:18,009 DEBUG [M:0;db9ad1cb6cf9:41851 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/acb02dc3ddf74ec98787ef759d890d69 is 69, key is db9ad1cb6cf9,37749,1730983936352/rs:state/1730983936596/Put/seqid=0 2024-11-07T12:52:18,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:52:18,013 INFO [RS:0;db9ad1cb6cf9:37749 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:52:18,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x1001a4ccbbc0001, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:52:18,013 INFO [RS:0;db9ad1cb6cf9:37749 {}] regionserver.HRegionServer(1031): Exiting; stopping=db9ad1cb6cf9,37749,1730983936352; zookeeper connection closed. 2024-11-07T12:52:18,013 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6eb10c59 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6eb10c59 2024-11-07T12:52:18,013 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-07T12:52:18,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741839_1015 (size=5156) 2024-11-07T12:52:18,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741839_1015 (size=5156) 2024-11-07T12:52:18,015 INFO [M:0;db9ad1cb6cf9:41851 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/acb02dc3ddf74ec98787ef759d890d69 2024-11-07T12:52:18,036 DEBUG [M:0;db9ad1cb6cf9:41851 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c390227df1b348f9b036c8203646abb4 is 52, key is load_balancer_on/state:d/1730983937287/Put/seqid=0 2024-11-07T12:52:18,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741840_1016 (size=5056) 2024-11-07T12:52:18,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741840_1016 (size=5056) 2024-11-07T12:52:18,041 INFO [M:0;db9ad1cb6cf9:41851 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c390227df1b348f9b036c8203646abb4 2024-11-07T12:52:18,047 DEBUG [M:0;db9ad1cb6cf9:41851 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2479fa14061f4729b2ba1007182b7471 as hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2479fa14061f4729b2ba1007182b7471 2024-11-07T12:52:18,053 INFO [M:0;db9ad1cb6cf9:41851 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2479fa14061f4729b2ba1007182b7471, entries=8, sequenceid=29, filesize=5.5 K 2024-11-07T12:52:18,054 DEBUG [M:0;db9ad1cb6cf9:41851 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/13543389e8b04aa8ab878b11b62e75fb as hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/13543389e8b04aa8ab878b11b62e75fb 2024-11-07T12:52:18,059 INFO [M:0;db9ad1cb6cf9:41851 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/13543389e8b04aa8ab878b11b62e75fb, entries=3, sequenceid=29, filesize=5.2 K 2024-11-07T12:52:18,060 DEBUG [M:0;db9ad1cb6cf9:41851 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/acb02dc3ddf74ec98787ef759d890d69 as hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/acb02dc3ddf74ec98787ef759d890d69 2024-11-07T12:52:18,066 INFO [M:0;db9ad1cb6cf9:41851 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/acb02dc3ddf74ec98787ef759d890d69, entries=1, sequenceid=29, filesize=5.0 K 2024-11-07T12:52:18,067 DEBUG [M:0;db9ad1cb6cf9:41851 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c390227df1b348f9b036c8203646abb4 as hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c390227df1b348f9b036c8203646abb4 2024-11-07T12:52:18,072 INFO [M:0;db9ad1cb6cf9:41851 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34841/user/jenkins/test-data/c5160e35-83d8-7530-0857-6ff0a1ee7544/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c390227df1b348f9b036c8203646abb4, entries=1, sequenceid=29, filesize=4.9 K 2024-11-07T12:52:18,073 INFO [M:0;db9ad1cb6cf9:41851 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=29, compaction requested=false 2024-11-07T12:52:18,075 INFO [M:0;db9ad1cb6cf9:41851 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:52:18,075 DEBUG [M:0;db9ad1cb6cf9:41851 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730983937924Disabling compacts and flushes for region at 1730983937924Disabling writes for close at 1730983937925 (+1 ms)Obtaining lock to block concurrent updates at 1730983937925Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1730983937925Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1730983937925Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1730983937926 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1730983937926Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1730983937942 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1730983937942Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1730983937957 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1730983937980 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1730983937980Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1730983937993 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1730983938008 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1730983938008Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1730983938020 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1730983938035 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1730983938035Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4623264b: reopening flushed file at 1730983938046 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a96ad89: reopening flushed file at 1730983938053 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c4119b2: reopening flushed file at 1730983938059 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11dd0657: reopening flushed file at 1730983938066 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=29, compaction requested=false at 1730983938073 (+7 ms)Writing region close event to WAL at 1730983938075 (+2 ms)Closed at 1730983938075 2024-11-07T12:52:18,075 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:18,075 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:18,075 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:18,076 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:18,076 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:18,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741830_1006 (size=10311) 2024-11-07T12:52:18,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46333 is added to blk_1073741830_1006 (size=10311) 2024-11-07T12:52:18,080 INFO [M:0;db9ad1cb6cf9:41851 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-07T12:52:18,080 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:52:18,080 INFO [M:0;db9ad1cb6cf9:41851 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41851 2024-11-07T12:52:18,080 INFO [M:0;db9ad1cb6cf9:41851 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:52:18,183 INFO [M:0;db9ad1cb6cf9:41851 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:52:18,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:52:18,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41851-0x1001a4ccbbc0000, quorum=127.0.0.1:53058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:52:18,186 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@18492d7d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:52:18,187 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30ebe7e3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:52:18,187 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:52:18,187 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@274298f3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:52:18,187 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@194f043a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/hadoop.log.dir/,STOPPED} 2024-11-07T12:52:18,189 WARN [BP-973058567-172.17.0.2-1730983935592 heartbeating to localhost/127.0.0.1:34841 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:52:18,189 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:52:18,189 WARN [BP-973058567-172.17.0.2-1730983935592 heartbeating to localhost/127.0.0.1:34841 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-973058567-172.17.0.2-1730983935592 (Datanode Uuid e50fc946-4ca3-46bd-b5ef-fe776876ee42) service to localhost/127.0.0.1:34841 2024-11-07T12:52:18,189 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:52:18,190 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/cluster_650ce62c-94fb-5950-fddf-7d7a279eeef4/data/data3/current/BP-973058567-172.17.0.2-1730983935592 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:52:18,190 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/cluster_650ce62c-94fb-5950-fddf-7d7a279eeef4/data/data4/current/BP-973058567-172.17.0.2-1730983935592 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:52:18,191 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:52:18,193 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a15ed6a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:52:18,193 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@b5aedfa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:52:18,193 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:52:18,193 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@469dec96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:52:18,194 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69a0f3c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/hadoop.log.dir/,STOPPED} 2024-11-07T12:52:18,195 WARN [BP-973058567-172.17.0.2-1730983935592 heartbeating to localhost/127.0.0.1:34841 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:52:18,195 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:52:18,195 WARN [BP-973058567-172.17.0.2-1730983935592 heartbeating to localhost/127.0.0.1:34841 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-973058567-172.17.0.2-1730983935592 (Datanode Uuid 507ce806-07c9-4a82-aaaa-e69ed5a7a205) service to localhost/127.0.0.1:34841 2024-11-07T12:52:18,195 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:52:18,196 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/cluster_650ce62c-94fb-5950-fddf-7d7a279eeef4/data/data1/current/BP-973058567-172.17.0.2-1730983935592 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:52:18,196 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/cluster_650ce62c-94fb-5950-fddf-7d7a279eeef4/data/data2/current/BP-973058567-172.17.0.2-1730983935592 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:52:18,197 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:52:18,204 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b8ef2ff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:52:18,204 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10885b70{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:52:18,205 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:52:18,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f681677{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:52:18,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59505eb5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/hadoop.log.dir/,STOPPED} 2024-11-07T12:52:18,213 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-07T12:52:18,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-07T12:52:18,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T12:52:18,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/hadoop.log.dir so I do NOT create it in target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a 2024-11-07T12:52:18,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/32b22f56-f021-f66b-48d4-717743c15ae3/hadoop.tmp.dir so I do NOT create it in target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a 2024-11-07T12:52:18,229 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3, deleteOnExit=true 2024-11-07T12:52:18,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-07T12:52:18,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/test.cache.data in system properties and HBase conf 2024-11-07T12:52:18,230 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T12:52:18,230 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.log.dir in system properties and HBase conf 2024-11-07T12:52:18,230 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T12:52:18,230 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T12:52:18,230 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-07T12:52:18,230 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T12:52:18,230 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:52:18,230 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:52:18,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T12:52:18,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:52:18,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T12:52:18,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T12:52:18,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:52:18,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:52:18,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T12:52:18,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/nfs.dump.dir in system properties and HBase conf 2024-11-07T12:52:18,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/java.io.tmpdir in system properties and HBase conf 2024-11-07T12:52:18,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:52:18,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T12:52:18,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T12:52:18,246 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:52:18,315 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:52:18,320 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:52:18,324 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:52:18,324 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:52:18,324 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:52:18,324 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:52:18,325 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@142d24a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:52:18,325 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42b52d44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:52:18,442 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c00ef51{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/java.io.tmpdir/jetty-localhost-45273-hadoop-hdfs-3_4_1-tests_jar-_-any-2769653150378684975/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:52:18,443 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@73b9709e{HTTP/1.1, (http/1.1)}{localhost:45273} 2024-11-07T12:52:18,443 INFO [Time-limited test {}] server.Server(415): Started @103712ms 2024-11-07T12:52:18,457 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:52:18,523 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:52:18,527 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:52:18,528 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:52:18,528 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:52:18,528 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:52:18,528 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65dec1b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:52:18,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a107105{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:52:18,617 INFO [regionserver/db9ad1cb6cf9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:52:18,643 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6de86657{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/java.io.tmpdir/jetty-localhost-37891-hadoop-hdfs-3_4_1-tests_jar-_-any-16618544251121567828/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:52:18,644 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6787773a{HTTP/1.1, (http/1.1)}{localhost:37891} 2024-11-07T12:52:18,644 INFO [Time-limited test {}] server.Server(415): Started @103913ms 2024-11-07T12:52:18,645 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:52:18,678 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:52:18,681 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:52:18,682 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:52:18,682 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:52:18,682 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:52:18,682 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66182b08{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:52:18,683 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7eee535{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:52:18,733 WARN [Thread-655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data1/current/BP-551199049-172.17.0.2-1730983938264/current, will proceed with Du for space computation calculation, 2024-11-07T12:52:18,733 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data2/current/BP-551199049-172.17.0.2-1730983938264/current, will proceed with Du for space computation calculation, 2024-11-07T12:52:18,750 WARN [Thread-634 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:52:18,753 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f9aae376c7bd694 with lease ID 0x780a2b110f549ce3: Processing first storage report for DS-b92cddd8-6d84-42d7-884f-ee397d4786cd from datanode DatanodeRegistration(127.0.0.1:39857, datanodeUuid=39460e06-c321-4be7-b8ee-764203916adf, infoPort=38363, infoSecurePort=0, ipcPort=36397, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264) 2024-11-07T12:52:18,753 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f9aae376c7bd694 with lease ID 0x780a2b110f549ce3: from storage DS-b92cddd8-6d84-42d7-884f-ee397d4786cd node DatanodeRegistration(127.0.0.1:39857, datanodeUuid=39460e06-c321-4be7-b8ee-764203916adf, infoPort=38363, infoSecurePort=0, ipcPort=36397, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:52:18,754 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f9aae376c7bd694 with lease ID 0x780a2b110f549ce3: Processing first storage report for DS-1854560d-5e7c-416c-972e-78553406e59e from datanode DatanodeRegistration(127.0.0.1:39857, datanodeUuid=39460e06-c321-4be7-b8ee-764203916adf, infoPort=38363, infoSecurePort=0, ipcPort=36397, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264) 2024-11-07T12:52:18,754 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f9aae376c7bd694 with lease ID 0x780a2b110f549ce3: from storage DS-1854560d-5e7c-416c-972e-78553406e59e node DatanodeRegistration(127.0.0.1:39857, datanodeUuid=39460e06-c321-4be7-b8ee-764203916adf, infoPort=38363, infoSecurePort=0, ipcPort=36397, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:52:18,799 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f2859b3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/java.io.tmpdir/jetty-localhost-40113-hadoop-hdfs-3_4_1-tests_jar-_-any-699259020340977814/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:52:18,800 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3b918d2a{HTTP/1.1, (http/1.1)}{localhost:40113} 2024-11-07T12:52:18,800 INFO [Time-limited test {}] server.Server(415): Started @104069ms 2024-11-07T12:52:18,801 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:52:18,886 WARN [Thread-681 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data3/current/BP-551199049-172.17.0.2-1730983938264/current, will proceed with Du for space computation calculation, 2024-11-07T12:52:18,887 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data4/current/BP-551199049-172.17.0.2-1730983938264/current, will proceed with Du for space computation calculation, 2024-11-07T12:52:18,904 WARN [Thread-670 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:52:18,906 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9352b8c17ac681c4 with lease ID 0x780a2b110f549ce4: Processing first storage report for DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899 from datanode DatanodeRegistration(127.0.0.1:34709, datanodeUuid=732f757f-6769-4bb3-bc12-fa7b328c287d, infoPort=46465, infoSecurePort=0, ipcPort=43057, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264) 2024-11-07T12:52:18,906 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9352b8c17ac681c4 with lease ID 0x780a2b110f549ce4: from storage DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899 node DatanodeRegistration(127.0.0.1:34709, datanodeUuid=732f757f-6769-4bb3-bc12-fa7b328c287d, infoPort=46465, infoSecurePort=0, ipcPort=43057, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:52:18,906 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9352b8c17ac681c4 with lease ID 0x780a2b110f549ce4: Processing first storage report for DS-9af7146e-b0f6-435b-bc0d-d4eac6f6348f from datanode DatanodeRegistration(127.0.0.1:34709, datanodeUuid=732f757f-6769-4bb3-bc12-fa7b328c287d, infoPort=46465, infoSecurePort=0, ipcPort=43057, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264) 2024-11-07T12:52:18,906 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9352b8c17ac681c4 with lease ID 0x780a2b110f549ce4: from storage DS-9af7146e-b0f6-435b-bc0d-d4eac6f6348f node DatanodeRegistration(127.0.0.1:34709, datanodeUuid=732f757f-6769-4bb3-bc12-fa7b328c287d, infoPort=46465, infoSecurePort=0, ipcPort=43057, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:52:18,927 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a 2024-11-07T12:52:18,930 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/zookeeper_0, clientPort=58729, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T12:52:18,931 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58729 2024-11-07T12:52:18,931 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:18,933 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:18,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34709 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:52:18,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39857 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:52:18,944 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201 with version=8 2024-11-07T12:52:18,944 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/hbase-staging 2024-11-07T12:52:18,946 INFO [Time-limited test {}] client.ConnectionUtils(128): master/db9ad1cb6cf9:0 server-side Connection retries=45 2024-11-07T12:52:18,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:52:18,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:52:18,946 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:52:18,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:52:18,947 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:52:18,947 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-07T12:52:18,947 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:52:18,947 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42273 2024-11-07T12:52:18,949 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42273 connecting to ZooKeeper ensemble=127.0.0.1:58729 2024-11-07T12:52:18,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:422730x0, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:52:18,955 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42273-0x1001a4cd6100000 connected 2024-11-07T12:52:18,970 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:18,971 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:18,973 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:52:18,974 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201, hbase.cluster.distributed=false 2024-11-07T12:52:18,975 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:52:18,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42273 2024-11-07T12:52:18,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42273 2024-11-07T12:52:18,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42273 2024-11-07T12:52:18,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42273 2024-11-07T12:52:18,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42273 2024-11-07T12:52:18,993 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/db9ad1cb6cf9:0 server-side Connection retries=45 2024-11-07T12:52:18,993 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:52:18,993 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:52:18,993 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:52:18,993 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:52:18,993 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:52:18,993 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:52:18,993 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:52:18,994 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42011 2024-11-07T12:52:18,995 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42011 connecting to ZooKeeper ensemble=127.0.0.1:58729 2024-11-07T12:52:18,996 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:18,998 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:19,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:420110x0, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:52:19,003 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:420110x0, quorum=127.0.0.1:58729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:52:19,003 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42011-0x1001a4cd6100001 connected 2024-11-07T12:52:19,003 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:52:19,004 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:52:19,004 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:52:19,005 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:52:19,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42011 2024-11-07T12:52:19,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42011 2024-11-07T12:52:19,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42011 2024-11-07T12:52:19,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42011 2024-11-07T12:52:19,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42011 2024-11-07T12:52:19,022 DEBUG [M:0;db9ad1cb6cf9:42273 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db9ad1cb6cf9:42273 2024-11-07T12:52:19,022 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/db9ad1cb6cf9,42273,1730983938946 2024-11-07T12:52:19,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:52:19,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:52:19,024 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db9ad1cb6cf9,42273,1730983938946 2024-11-07T12:52:19,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:52:19,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:19,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:19,026 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:52:19,027 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db9ad1cb6cf9,42273,1730983938946 from backup master directory 2024-11-07T12:52:19,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:52:19,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db9ad1cb6cf9,42273,1730983938946 2024-11-07T12:52:19,028 WARN [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:52:19,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:52:19,028 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db9ad1cb6cf9,42273,1730983938946 2024-11-07T12:52:19,033 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/hbase.id] with ID: c5ff3d34-b209-493c-8806-713447c6ab20 2024-11-07T12:52:19,033 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/.tmp/hbase.id 2024-11-07T12:52:19,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34709 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:52:19,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39857 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:52:19,040 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/.tmp/hbase.id]:[hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/hbase.id] 2024-11-07T12:52:19,053 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:19,053 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-07T12:52:19,054 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-07T12:52:19,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:19,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:19,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34709 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:52:19,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39857 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:52:19,065 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:52:19,066 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T12:52:19,066 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:52:19,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34709 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:52:19,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39857 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:52:19,075 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store 2024-11-07T12:52:19,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34709 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:52:19,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39857 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:52:19,084 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:52:19,084 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:52:19,084 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:52:19,084 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:52:19,084 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:52:19,084 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:52:19,084 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:52:19,084 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730983939084Disabling compacts and flushes for region at 1730983939084Disabling writes for close at 1730983939084Writing region close event to WAL at 1730983939084Closed at 1730983939084 2024-11-07T12:52:19,085 WARN [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/.initializing 2024-11-07T12:52:19,085 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/WALs/db9ad1cb6cf9,42273,1730983938946 2024-11-07T12:52:19,088 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C42273%2C1730983938946, suffix=, logDir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/WALs/db9ad1cb6cf9,42273,1730983938946, archiveDir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/oldWALs, maxLogs=10 2024-11-07T12:52:19,089 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C42273%2C1730983938946.1730983939088 2024-11-07T12:52:19,095 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/WALs/db9ad1cb6cf9,42273,1730983938946/db9ad1cb6cf9%2C42273%2C1730983938946.1730983939088 2024-11-07T12:52:19,095 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38363:38363),(127.0.0.1/127.0.0.1:46465:46465)] 2024-11-07T12:52:19,096 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:52:19,096 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:52:19,096 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:19,097 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:19,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:19,102 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T12:52:19,102 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:19,103 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:19,103 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:19,104 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T12:52:19,104 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:19,105 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:52:19,105 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:19,106 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T12:52:19,106 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:19,107 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:52:19,107 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:19,108 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T12:52:19,108 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:19,109 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:52:19,109 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:19,109 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:19,110 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:19,111 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:19,111 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:19,112 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T12:52:19,113 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:52:19,116 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:52:19,116 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=720834, jitterRate=-0.0834130346775055}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T12:52:19,117 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1730983939097Initializing all the Stores at 1730983939098 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983939098Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983939100 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983939100Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983939100Cleaning up temporary data from old regions at 1730983939111 (+11 ms)Region opened successfully at 1730983939117 (+6 ms) 2024-11-07T12:52:19,117 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T12:52:19,122 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14d3f444, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9ad1cb6cf9/172.17.0.2:0 2024-11-07T12:52:19,123 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-07T12:52:19,123 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T12:52:19,123 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T12:52:19,123 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T12:52:19,123 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-07T12:52:19,124 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-07T12:52:19,124 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T12:52:19,126 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T12:52:19,127 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T12:52:19,128 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-07T12:52:19,129 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T12:52:19,129 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T12:52:19,130 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-07T12:52:19,131 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T12:52:19,132 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T12:52:19,133 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-07T12:52:19,134 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T12:52:19,135 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T12:52:19,137 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T12:52:19,138 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T12:52:19,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:52:19,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:52:19,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:19,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:19,140 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=db9ad1cb6cf9,42273,1730983938946, sessionid=0x1001a4cd6100000, setting cluster-up flag (Was=false) 2024-11-07T12:52:19,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:19,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:19,148 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T12:52:19,149 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9ad1cb6cf9,42273,1730983938946 2024-11-07T12:52:19,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:19,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:19,157 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T12:52:19,158 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9ad1cb6cf9,42273,1730983938946 2024-11-07T12:52:19,160 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-07T12:52:19,162 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-07T12:52:19,162 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-07T12:52:19,162 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T12:52:19,163 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db9ad1cb6cf9,42273,1730983938946 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T12:52:19,164 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:52:19,164 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:52:19,164 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:52:19,164 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:52:19,164 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db9ad1cb6cf9:0, corePoolSize=10, maxPoolSize=10 2024-11-07T12:52:19,164 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:19,164 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:52:19,164 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:19,166 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730983969166 2024-11-07T12:52:19,166 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T12:52:19,166 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T12:52:19,166 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T12:52:19,166 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T12:52:19,166 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T12:52:19,166 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T12:52:19,166 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,166 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:52:19,167 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-07T12:52:19,167 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T12:52:19,167 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T12:52:19,167 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T12:52:19,167 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T12:52:19,167 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T12:52:19,168 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730983939167,5,FailOnTimeoutGroup] 2024-11-07T12:52:19,168 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730983939168,5,FailOnTimeoutGroup] 2024-11-07T12:52:19,168 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,168 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:19,168 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T12:52:19,168 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,168 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,168 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T12:52:19,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34709 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:52:19,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39857 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:52:19,179 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-07T12:52:19,179 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201 2024-11-07T12:52:19,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34709 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:52:19,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39857 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:52:19,190 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:52:19,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:52:19,194 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:52:19,194 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:19,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:19,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:52:19,197 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:52:19,197 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:19,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:19,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:52:19,199 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:52:19,199 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:19,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:19,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:52:19,201 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:52:19,202 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:19,202 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:19,202 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:52:19,203 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740 2024-11-07T12:52:19,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740 2024-11-07T12:52:19,205 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:52:19,205 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:52:19,206 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:52:19,207 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:52:19,210 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:52:19,210 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=794060, jitterRate=0.009699612855911255}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:52:19,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1730983939190Initializing all the Stores at 1730983939191 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983939191Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983939192 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983939192Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983939192Cleaning up temporary data from old regions at 1730983939205 (+13 ms)Region opened successfully at 1730983939211 (+6 ms) 2024-11-07T12:52:19,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:52:19,211 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:52:19,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:52:19,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:52:19,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:52:19,211 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer(746): ClusterId : c5ff3d34-b209-493c-8806-713447c6ab20 2024-11-07T12:52:19,211 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:52:19,212 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:52:19,212 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730983939211Disabling compacts and flushes for region at 1730983939211Disabling writes for close at 1730983939211Writing region close event to WAL at 1730983939212 (+1 ms)Closed at 1730983939212 2024-11-07T12:52:19,214 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:52:19,214 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-07T12:52:19,214 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T12:52:19,214 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:52:19,214 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:52:19,216 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:52:19,217 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:52:19,217 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T12:52:19,218 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@aa1ed50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9ad1cb6cf9/172.17.0.2:0 2024-11-07T12:52:19,236 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db9ad1cb6cf9:42011 2024-11-07T12:52:19,237 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T12:52:19,237 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T12:52:19,237 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T12:52:19,238 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer(2659): reportForDuty to master=db9ad1cb6cf9,42273,1730983938946 with port=42011, startcode=1730983938993 2024-11-07T12:52:19,238 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:52:19,240 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56347, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:52:19,241 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42273 {}] master.ServerManager(363): Checking decommissioned status of RegionServer db9ad1cb6cf9,42011,1730983938993 2024-11-07T12:52:19,241 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42273 {}] master.ServerManager(517): Registering regionserver=db9ad1cb6cf9,42011,1730983938993 2024-11-07T12:52:19,243 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201 2024-11-07T12:52:19,243 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37737 2024-11-07T12:52:19,243 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T12:52:19,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:52:19,245 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] zookeeper.ZKUtil(111): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db9ad1cb6cf9,42011,1730983938993 2024-11-07T12:52:19,245 WARN [RS:0;db9ad1cb6cf9:42011 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:52:19,245 INFO [RS:0;db9ad1cb6cf9:42011 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:52:19,246 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993 2024-11-07T12:52:19,248 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db9ad1cb6cf9,42011,1730983938993] 2024-11-07T12:52:19,251 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:52:19,253 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:52:19,254 INFO [RS:0;db9ad1cb6cf9:42011 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:52:19,254 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,254 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T12:52:19,255 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T12:52:19,255 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,255 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:19,255 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:19,256 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:19,256 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:19,256 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:19,256 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db9ad1cb6cf9:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:52:19,256 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:19,256 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:19,256 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:19,256 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:19,256 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:19,256 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:19,256 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db9ad1cb6cf9:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:52:19,257 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:52:19,257 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,257 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,257 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,257 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,257 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,257 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,42011,1730983938993-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:52:19,275 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:52:19,275 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,42011,1730983938993-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,275 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,275 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.Replication(171): db9ad1cb6cf9,42011,1730983938993 started 2024-11-07T12:52:19,291 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,291 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer(1482): Serving as db9ad1cb6cf9,42011,1730983938993, RpcServer on db9ad1cb6cf9/172.17.0.2:42011, sessionid=0x1001a4cd6100001 2024-11-07T12:52:19,291 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:52:19,291 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db9ad1cb6cf9,42011,1730983938993 2024-11-07T12:52:19,291 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9ad1cb6cf9,42011,1730983938993' 2024-11-07T12:52:19,291 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:52:19,292 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:52:19,293 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:52:19,293 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:52:19,293 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db9ad1cb6cf9,42011,1730983938993 2024-11-07T12:52:19,293 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9ad1cb6cf9,42011,1730983938993' 2024-11-07T12:52:19,293 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:52:19,293 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:52:19,294 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:52:19,294 INFO [RS:0;db9ad1cb6cf9:42011 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:52:19,294 INFO [RS:0;db9ad1cb6cf9:42011 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:52:19,368 WARN [db9ad1cb6cf9:42273 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-07T12:52:19,396 INFO [RS:0;db9ad1cb6cf9:42011 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C42011%2C1730983938993, suffix=, logDir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993, archiveDir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/oldWALs, maxLogs=32 2024-11-07T12:52:19,398 INFO [RS:0;db9ad1cb6cf9:42011 {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 2024-11-07T12:52:19,406 INFO [RS:0;db9ad1cb6cf9:42011 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 2024-11-07T12:52:19,410 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38363:38363),(127.0.0.1/127.0.0.1:46465:46465)] 2024-11-07T12:52:19,618 DEBUG [db9ad1cb6cf9:42273 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-07T12:52:19,619 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db9ad1cb6cf9,42011,1730983938993 2024-11-07T12:52:19,620 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9ad1cb6cf9,42011,1730983938993, state=OPENING 2024-11-07T12:52:19,622 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T12:52:19,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:19,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:19,624 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:52:19,624 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:52:19,624 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:52:19,624 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,42011,1730983938993}] 2024-11-07T12:52:19,778 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T12:52:19,780 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48805, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T12:52:19,783 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-07T12:52:19,784 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:52:19,786 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C42011%2C1730983938993.meta, suffix=.meta, logDir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993, archiveDir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/oldWALs, maxLogs=32 2024-11-07T12:52:19,787 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta 2024-11-07T12:52:19,792 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta 2024-11-07T12:52:19,800 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46465:46465),(127.0.0.1/127.0.0.1:38363:38363)] 2024-11-07T12:52:19,805 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:52:19,805 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T12:52:19,805 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T12:52:19,805 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T12:52:19,805 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T12:52:19,805 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:52:19,805 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-07T12:52:19,806 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-07T12:52:19,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:52:19,809 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:52:19,809 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:19,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:19,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:52:19,810 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:52:19,810 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:19,811 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:19,811 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:52:19,812 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:52:19,812 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:19,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:19,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:52:19,813 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:52:19,813 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:19,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:52:19,814 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:52:19,814 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740 2024-11-07T12:52:19,816 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740 2024-11-07T12:52:19,817 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:52:19,817 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:52:19,818 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:52:19,819 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:52:19,820 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800848, jitterRate=0.018332049250602722}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:52:19,820 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-07T12:52:19,821 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1730983939806Writing region info on filesystem at 1730983939806Initializing all the Stores at 1730983939807 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983939807Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983939807Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983939807Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983939807Cleaning up temporary data from old regions at 1730983939817 (+10 ms)Running coprocessor post-open hooks at 1730983939820 (+3 ms)Region opened successfully at 1730983939821 (+1 ms) 2024-11-07T12:52:19,822 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730983939777 2024-11-07T12:52:19,825 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T12:52:19,825 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-07T12:52:19,826 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db9ad1cb6cf9,42011,1730983938993 2024-11-07T12:52:19,827 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9ad1cb6cf9,42011,1730983938993, state=OPEN 2024-11-07T12:52:19,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:52:19,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:52:19,831 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,42011,1730983938993 2024-11-07T12:52:19,831 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:52:19,831 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:52:19,834 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T12:52:19,834 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,42011,1730983938993 in 207 msec 2024-11-07T12:52:19,838 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T12:52:19,838 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 620 msec 2024-11-07T12:52:19,839 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:52:19,839 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-07T12:52:19,840 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:52:19,841 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db9ad1cb6cf9,42011,1730983938993, seqNum=-1] 2024-11-07T12:52:19,841 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:52:19,842 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58463, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:52:19,848 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 686 msec 2024-11-07T12:52:19,848 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730983939848, completionTime=-1 2024-11-07T12:52:19,848 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-07T12:52:19,848 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-07T12:52:19,850 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-07T12:52:19,850 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730983999850 2024-11-07T12:52:19,850 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730984059850 2024-11-07T12:52:19,850 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-07T12:52:19,851 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,42273,1730983938946-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,851 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,42273,1730983938946-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,851 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,42273,1730983938946-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,851 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db9ad1cb6cf9:42273, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,851 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,851 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,853 DEBUG [master/db9ad1cb6cf9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-07T12:52:19,855 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.827sec 2024-11-07T12:52:19,855 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T12:52:19,855 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T12:52:19,855 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T12:52:19,855 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T12:52:19,855 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T12:52:19,855 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,42273,1730983938946-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:52:19,855 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,42273,1730983938946-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T12:52:19,858 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-07T12:52:19,858 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T12:52:19,858 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,42273,1730983938946-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:19,912 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53c8d161, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:52:19,912 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request db9ad1cb6cf9,42273,-1 for getting cluster id 2024-11-07T12:52:19,912 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-07T12:52:19,914 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c5ff3d34-b209-493c-8806-713447c6ab20' 2024-11-07T12:52:19,914 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-07T12:52:19,914 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c5ff3d34-b209-493c-8806-713447c6ab20" 2024-11-07T12:52:19,915 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65e8844b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:52:19,915 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [db9ad1cb6cf9,42273,-1] 2024-11-07T12:52:19,915 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-07T12:52:19,915 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:19,917 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34264, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-07T12:52:19,918 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cae84f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:52:19,918 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:52:19,919 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db9ad1cb6cf9,42011,1730983938993, seqNum=-1] 2024-11-07T12:52:19,919 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:52:19,921 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33554, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:52:19,923 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=db9ad1cb6cf9,42273,1730983938946 2024-11-07T12:52:19,923 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:19,926 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-07T12:52:19,942 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/db9ad1cb6cf9:0 server-side Connection retries=45 2024-11-07T12:52:19,942 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:52:19,943 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:52:19,943 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:52:19,943 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:52:19,943 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:52:19,943 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:52:19,943 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:52:19,943 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43605 2024-11-07T12:52:19,945 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43605 connecting to ZooKeeper ensemble=127.0.0.1:58729 2024-11-07T12:52:19,945 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:19,947 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:52:19,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:436050x0, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:52:19,952 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43605-0x1001a4cd6100002 connected 2024-11-07T12:52:19,953 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:43605-0x1001a4cd6100002, quorum=127.0.0.1:58729, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-07T12:52:19,953 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-07T12:52:19,953 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:52:19,954 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:52:19,954 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:43605-0x1001a4cd6100002, quorum=127.0.0.1:58729, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:52:19,956 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43605-0x1001a4cd6100002, quorum=127.0.0.1:58729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:52:19,956 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43605 2024-11-07T12:52:19,956 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43605 2024-11-07T12:52:19,957 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43605 2024-11-07T12:52:19,957 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43605 2024-11-07T12:52:19,958 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43605 2024-11-07T12:52:19,960 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.HRegionServer(746): ClusterId : c5ff3d34-b209-493c-8806-713447c6ab20 2024-11-07T12:52:19,960 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:52:19,962 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:52:19,962 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:52:19,964 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:52:19,964 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34e94bff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9ad1cb6cf9/172.17.0.2:0 2024-11-07T12:52:19,977 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;db9ad1cb6cf9:43605 2024-11-07T12:52:19,977 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T12:52:19,977 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T12:52:19,977 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T12:52:19,978 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.HRegionServer(2659): reportForDuty to master=db9ad1cb6cf9,42273,1730983938946 with port=43605, startcode=1730983939942 2024-11-07T12:52:19,978 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:52:19,979 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53939, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:52:19,980 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42273 {}] master.ServerManager(363): Checking decommissioned status of RegionServer db9ad1cb6cf9,43605,1730983939942 2024-11-07T12:52:19,980 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42273 {}] master.ServerManager(517): Registering regionserver=db9ad1cb6cf9,43605,1730983939942 2024-11-07T12:52:19,982 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201 2024-11-07T12:52:19,982 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37737 2024-11-07T12:52:19,982 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T12:52:19,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:52:19,984 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] zookeeper.ZKUtil(111): regionserver:43605-0x1001a4cd6100002, quorum=127.0.0.1:58729, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db9ad1cb6cf9,43605,1730983939942 2024-11-07T12:52:19,984 WARN [RS:1;db9ad1cb6cf9:43605 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:52:19,984 INFO [RS:1;db9ad1cb6cf9:43605 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:52:19,984 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db9ad1cb6cf9,43605,1730983939942] 2024-11-07T12:52:19,984 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942 2024-11-07T12:52:19,988 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:52:20,015 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-07T12:52:20,016 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:52:20,017 INFO [RS:1;db9ad1cb6cf9:43605 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:52:20,017 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:20,017 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T12:52:20,018 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T12:52:20,018 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:20,018 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:20,018 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:20,019 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:20,019 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:20,019 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:20,019 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db9ad1cb6cf9:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:52:20,019 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:20,019 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:20,019 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:20,019 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:20,019 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:20,019 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:52:20,019 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db9ad1cb6cf9:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:52:20,019 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:52:20,019 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:20,019 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:20,020 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:20,020 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:20,020 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:20,020 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,43605,1730983939942-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:52:20,036 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:52:20,036 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,43605,1730983939942-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:20,036 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:20,036 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.Replication(171): db9ad1cb6cf9,43605,1730983939942 started 2024-11-07T12:52:20,049 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:52:20,050 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.HRegionServer(1482): Serving as db9ad1cb6cf9,43605,1730983939942, RpcServer on db9ad1cb6cf9/172.17.0.2:43605, sessionid=0x1001a4cd6100002 2024-11-07T12:52:20,050 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:52:20,050 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db9ad1cb6cf9,43605,1730983939942 2024-11-07T12:52:20,050 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9ad1cb6cf9,43605,1730983939942' 2024-11-07T12:52:20,050 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:52:20,050 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;db9ad1cb6cf9:43605,5,FailOnTimeoutGroup] 2024-11-07T12:52:20,050 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-07T12:52:20,050 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-07T12:52:20,051 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:52:20,051 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:52:20,051 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:52:20,051 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db9ad1cb6cf9,43605,1730983939942 2024-11-07T12:52:20,051 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9ad1cb6cf9,43605,1730983939942' 2024-11-07T12:52:20,051 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:52:20,052 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is db9ad1cb6cf9,42273,1730983938946 2024-11-07T12:52:20,052 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:52:20,052 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1459431e 2024-11-07T12:52:20,052 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T12:52:20,052 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:52:20,052 INFO [RS:1;db9ad1cb6cf9:43605 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:52:20,052 INFO [RS:1;db9ad1cb6cf9:43605 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:52:20,054 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34280, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T12:52:20,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42273 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-07T12:52:20,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42273 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-07T12:52:20,056 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42273 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:52:20,056 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:52:20,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42273 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-07T12:52:20,059 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T12:52:20,060 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:20,060 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42273 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-07T12:52:20,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:52:20,061 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T12:52:20,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42273 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:52:20,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39857 is added to blk_1073741835_1011 (size=393) 2024-11-07T12:52:20,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34709 is added to blk_1073741835_1011 (size=393) 2024-11-07T12:52:20,072 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 68427f795afd4a50d7e5b045fbc4e8bb, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201 2024-11-07T12:52:20,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34709 is added to blk_1073741836_1012 (size=76) 2024-11-07T12:52:20,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39857 is added to blk_1073741836_1012 (size=76) 2024-11-07T12:52:20,080 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:52:20,080 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 68427f795afd4a50d7e5b045fbc4e8bb, disabling compactions & flushes 2024-11-07T12:52:20,080 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. 2024-11-07T12:52:20,080 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. 2024-11-07T12:52:20,080 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. after waiting 0 ms 2024-11-07T12:52:20,080 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. 2024-11-07T12:52:20,080 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. 2024-11-07T12:52:20,080 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 68427f795afd4a50d7e5b045fbc4e8bb: Waiting for close lock at 1730983940080Disabling compacts and flushes for region at 1730983940080Disabling writes for close at 1730983940080Writing region close event to WAL at 1730983940080Closed at 1730983940080 2024-11-07T12:52:20,082 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T12:52:20,083 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1730983940082"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730983940082"}]},"ts":"1730983940082"} 2024-11-07T12:52:20,085 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-07T12:52:20,087 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T12:52:20,087 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730983940087"}]},"ts":"1730983940087"} 2024-11-07T12:52:20,089 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-07T12:52:20,090 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=68427f795afd4a50d7e5b045fbc4e8bb, ASSIGN}] 2024-11-07T12:52:20,091 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=68427f795afd4a50d7e5b045fbc4e8bb, ASSIGN 2024-11-07T12:52:20,093 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=68427f795afd4a50d7e5b045fbc4e8bb, ASSIGN; state=OFFLINE, location=db9ad1cb6cf9,42011,1730983938993; forceNewPlan=false, retain=false 2024-11-07T12:52:20,155 INFO [RS:1;db9ad1cb6cf9:43605 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C43605%2C1730983939942, suffix=, logDir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942, archiveDir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/oldWALs, maxLogs=32 2024-11-07T12:52:20,156 INFO [RS:1;db9ad1cb6cf9:43605 {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 2024-11-07T12:52:20,162 INFO [RS:1;db9ad1cb6cf9:43605 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 2024-11-07T12:52:20,163 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46465:46465),(127.0.0.1/127.0.0.1:38363:38363)] 2024-11-07T12:52:20,243 INFO [db9ad1cb6cf9:42273 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-07T12:52:20,244 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=68427f795afd4a50d7e5b045fbc4e8bb, regionState=OPENING, regionLocation=db9ad1cb6cf9,42011,1730983938993 2024-11-07T12:52:20,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=68427f795afd4a50d7e5b045fbc4e8bb, ASSIGN because future has completed 2024-11-07T12:52:20,247 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 68427f795afd4a50d7e5b045fbc4e8bb, server=db9ad1cb6cf9,42011,1730983938993}] 2024-11-07T12:52:20,404 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. 2024-11-07T12:52:20,405 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 68427f795afd4a50d7e5b045fbc4e8bb, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:52:20,405 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:20,405 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:52:20,406 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:20,406 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:20,407 INFO [StoreOpener-68427f795afd4a50d7e5b045fbc4e8bb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:20,409 INFO [StoreOpener-68427f795afd4a50d7e5b045fbc4e8bb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 68427f795afd4a50d7e5b045fbc4e8bb columnFamilyName info 2024-11-07T12:52:20,409 DEBUG [StoreOpener-68427f795afd4a50d7e5b045fbc4e8bb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:52:20,409 INFO [StoreOpener-68427f795afd4a50d7e5b045fbc4e8bb-1 {}] regionserver.HStore(327): Store=68427f795afd4a50d7e5b045fbc4e8bb/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:52:20,409 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:20,410 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:20,410 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:20,411 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:20,411 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:20,412 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:20,414 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:52:20,415 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 68427f795afd4a50d7e5b045fbc4e8bb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=688491, jitterRate=-0.12453879415988922}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T12:52:20,415 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:20,415 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 68427f795afd4a50d7e5b045fbc4e8bb: Running coprocessor pre-open hook at 1730983940406Writing region info on filesystem at 1730983940406Initializing all the Stores at 1730983940407 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983940407Cleaning up temporary data from old regions at 1730983940411 (+4 ms)Running coprocessor post-open hooks at 1730983940415 (+4 ms)Region opened successfully at 1730983940415 2024-11-07T12:52:20,417 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb., pid=6, masterSystemTime=1730983940400 2024-11-07T12:52:20,419 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. 2024-11-07T12:52:20,419 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. 2024-11-07T12:52:20,420 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=68427f795afd4a50d7e5b045fbc4e8bb, regionState=OPEN, openSeqNum=2, regionLocation=db9ad1cb6cf9,42011,1730983938993 2024-11-07T12:52:20,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 68427f795afd4a50d7e5b045fbc4e8bb, server=db9ad1cb6cf9,42011,1730983938993 because future has completed 2024-11-07T12:52:20,427 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-07T12:52:20,427 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 68427f795afd4a50d7e5b045fbc4e8bb, server=db9ad1cb6cf9,42011,1730983938993 in 177 msec 2024-11-07T12:52:20,430 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-07T12:52:20,430 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=68427f795afd4a50d7e5b045fbc4e8bb, ASSIGN in 337 msec 2024-11-07T12:52:20,431 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T12:52:20,431 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730983940431"}]},"ts":"1730983940431"} 2024-11-07T12:52:20,433 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-07T12:52:20,434 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T12:52:20,436 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 378 msec 2024-11-07T12:52:20,575 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:52:20,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:52:20,594 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:52:20,595 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:52:20,595 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:52:25,518 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:52:25,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:52:25,536 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:52:25,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:52:25,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:52:25,544 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-07T12:52:25,544 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-07T12:52:29,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-07T12:52:30,000 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-07T12:52:30,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42273 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:52:30,083 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-07T12:52:30,083 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-07T12:52:30,086 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-07T12:52:30,086 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. 2024-11-07T12:52:30,099 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:52:30,102 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:52:30,103 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:52:30,103 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:52:30,103 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:52:30,103 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21ddd8f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:52:30,104 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d0f4a9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:52:30,220 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@730725ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/java.io.tmpdir/jetty-localhost-39011-hadoop-hdfs-3_4_1-tests_jar-_-any-17606070842605921076/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:52:30,221 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20e1b523{HTTP/1.1, (http/1.1)}{localhost:39011} 2024-11-07T12:52:30,221 INFO [Time-limited test {}] server.Server(415): Started @115490ms 2024-11-07T12:52:30,222 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:52:30,258 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:52:30,261 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:52:30,264 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:52:30,264 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:52:30,264 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:52:30,265 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@83c4e47{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:52:30,265 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@519de6b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:52:30,306 WARN [Thread-827 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data5/current/BP-551199049-172.17.0.2-1730983938264/current, will proceed with Du for space computation calculation, 2024-11-07T12:52:30,306 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data6/current/BP-551199049-172.17.0.2-1730983938264/current, will proceed with Du for space computation calculation, 2024-11-07T12:52:30,324 WARN [Thread-807 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:52:30,327 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x527d22c7a55f5216 with lease ID 0x780a2b110f549ce5: Processing first storage report for DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6 from datanode DatanodeRegistration(127.0.0.1:41865, datanodeUuid=4be56dc1-2e38-42da-b3d0-0feab1f74a00, infoPort=40103, infoSecurePort=0, ipcPort=41085, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264) 2024-11-07T12:52:30,327 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x527d22c7a55f5216 with lease ID 0x780a2b110f549ce5: from storage DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6 node DatanodeRegistration(127.0.0.1:41865, datanodeUuid=4be56dc1-2e38-42da-b3d0-0feab1f74a00, infoPort=40103, infoSecurePort=0, ipcPort=41085, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:52:30,327 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x527d22c7a55f5216 with lease ID 0x780a2b110f549ce5: Processing first storage report for DS-f957e689-c90d-41ec-b72b-fee6cdfe7ce9 from datanode DatanodeRegistration(127.0.0.1:41865, datanodeUuid=4be56dc1-2e38-42da-b3d0-0feab1f74a00, infoPort=40103, infoSecurePort=0, ipcPort=41085, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264) 2024-11-07T12:52:30,328 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x527d22c7a55f5216 with lease ID 0x780a2b110f549ce5: from storage DS-f957e689-c90d-41ec-b72b-fee6cdfe7ce9 node DatanodeRegistration(127.0.0.1:41865, datanodeUuid=4be56dc1-2e38-42da-b3d0-0feab1f74a00, infoPort=40103, infoSecurePort=0, ipcPort=41085, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:52:30,382 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4438143d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/java.io.tmpdir/jetty-localhost-39419-hadoop-hdfs-3_4_1-tests_jar-_-any-6273702413177223939/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:52:30,382 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@32dac098{HTTP/1.1, (http/1.1)}{localhost:39419} 2024-11-07T12:52:30,382 INFO [Time-limited test {}] server.Server(415): Started @115651ms 2024-11-07T12:52:30,383 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:52:30,417 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:52:30,421 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:52:30,422 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:52:30,422 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:52:30,422 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:52:30,423 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3dc0bdb3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:52:30,423 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50ff2063{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:52:30,463 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data7/current/BP-551199049-172.17.0.2-1730983938264/current, will proceed with Du for space computation calculation, 2024-11-07T12:52:30,463 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data8/current/BP-551199049-172.17.0.2-1730983938264/current, will proceed with Du for space computation calculation, 2024-11-07T12:52:30,480 WARN [Thread-842 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:52:30,483 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7867292be563a044 with lease ID 0x780a2b110f549ce6: Processing first storage report for DS-1aa386ef-ea55-4893-8b64-bae689b22210 from datanode DatanodeRegistration(127.0.0.1:44697, datanodeUuid=5fd60497-e5e1-4dd7-a515-1228a8c61184, infoPort=34499, infoSecurePort=0, ipcPort=40327, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264) 2024-11-07T12:52:30,483 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7867292be563a044 with lease ID 0x780a2b110f549ce6: from storage DS-1aa386ef-ea55-4893-8b64-bae689b22210 node DatanodeRegistration(127.0.0.1:44697, datanodeUuid=5fd60497-e5e1-4dd7-a515-1228a8c61184, infoPort=34499, infoSecurePort=0, ipcPort=40327, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:52:30,483 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7867292be563a044 with lease ID 0x780a2b110f549ce6: Processing first storage report for DS-9f378088-7867-4776-ab0a-689c463961f4 from datanode DatanodeRegistration(127.0.0.1:44697, datanodeUuid=5fd60497-e5e1-4dd7-a515-1228a8c61184, infoPort=34499, infoSecurePort=0, ipcPort=40327, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264) 2024-11-07T12:52:30,483 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7867292be563a044 with lease ID 0x780a2b110f549ce6: from storage DS-9f378088-7867-4776-ab0a-689c463961f4 node DatanodeRegistration(127.0.0.1:44697, datanodeUuid=5fd60497-e5e1-4dd7-a515-1228a8c61184, infoPort=34499, infoSecurePort=0, ipcPort=40327, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:52:30,544 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c81b75d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/java.io.tmpdir/jetty-localhost-46841-hadoop-hdfs-3_4_1-tests_jar-_-any-14506871610649939391/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:52:30,544 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f55aa3b{HTTP/1.1, (http/1.1)}{localhost:46841} 2024-11-07T12:52:30,544 INFO [Time-limited test {}] server.Server(415): Started @115813ms 2024-11-07T12:52:30,545 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:52:30,626 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data9/current/BP-551199049-172.17.0.2-1730983938264/current, will proceed with Du for space computation calculation, 2024-11-07T12:52:30,626 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data10/current/BP-551199049-172.17.0.2-1730983938264/current, will proceed with Du for space computation calculation, 2024-11-07T12:52:30,642 WARN [Thread-877 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:52:30,645 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbe430fdbf6447053 with lease ID 0x780a2b110f549ce7: Processing first storage report for DS-f579e5a1-55af-4789-8bb7-a3693c165127 from datanode DatanodeRegistration(127.0.0.1:41001, datanodeUuid=c56183c4-a159-4ce2-a559-2d72997be584, infoPort=43131, infoSecurePort=0, ipcPort=44725, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264) 2024-11-07T12:52:30,645 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbe430fdbf6447053 with lease ID 0x780a2b110f549ce7: from storage DS-f579e5a1-55af-4789-8bb7-a3693c165127 node DatanodeRegistration(127.0.0.1:41001, datanodeUuid=c56183c4-a159-4ce2-a559-2d72997be584, infoPort=43131, infoSecurePort=0, ipcPort=44725, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:52:30,645 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbe430fdbf6447053 with lease ID 0x780a2b110f549ce7: Processing first storage report for DS-72572287-a77c-479a-af58-83e5d2f26f83 from datanode DatanodeRegistration(127.0.0.1:41001, datanodeUuid=c56183c4-a159-4ce2-a559-2d72997be584, infoPort=43131, infoSecurePort=0, ipcPort=44725, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264) 2024-11-07T12:52:30,645 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbe430fdbf6447053 with lease ID 0x780a2b110f549ce7: from storage DS-72572287-a77c-479a-af58-83e5d2f26f83 node DatanodeRegistration(127.0.0.1:41001, datanodeUuid=c56183c4-a159-4ce2-a559-2d72997be584, infoPort=43131, infoSecurePort=0, ipcPort=44725, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-07T12:52:30,665 WARN [ResponseProcessor for block BP-551199049-172.17.0.2-1730983938264:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-551199049-172.17.0.2-1730983938264:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:30,665 WARN [ResponseProcessor for block BP-551199049-172.17.0.2-1730983938264:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-551199049-172.17.0.2-1730983938264:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:30,666 WARN [ResponseProcessor for block BP-551199049-172.17.0.2-1730983938264:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-551199049-172.17.0.2-1730983938264:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-551199049-172.17.0.2-1730983938264:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:30,666 WARN [ResponseProcessor for block BP-551199049-172.17.0.2-1730983938264:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-551199049-172.17.0.2-1730983938264:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-551199049-172.17.0.2-1730983938264:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:30,666 WARN [DataStreamer for file /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 block BP-551199049-172.17.0.2-1730983938264:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK], DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]) is bad. 2024-11-07T12:52:30,666 WARN [DataStreamer for file /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/WALs/db9ad1cb6cf9,42273,1730983938946/db9ad1cb6cf9%2C42273%2C1730983938946.1730983939088 block BP-551199049-172.17.0.2-1730983938264:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK], DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]) is bad. 2024-11-07T12:52:30,666 WARN [DataStreamer for file /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta block BP-551199049-172.17.0.2-1730983938264:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK], DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]) is bad. 2024-11-07T12:52:30,666 WARN [DataStreamer for file /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 block BP-551199049-172.17.0.2-1730983938264:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK], DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]) is bad. 2024-11-07T12:52:30,666 WARN [PacketResponder: BP-551199049-172.17.0.2-1730983938264:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34709] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:30,667 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1226118773_22 at /127.0.0.1:33244 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:34709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33244 dst: /127.0.0.1:34709 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:30,667 WARN [PacketResponder: BP-551199049-172.17.0.2-1730983938264:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34709] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:30,667 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:51038 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51038 dst: /127.0.0.1:39857 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:30,667 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1226118773_22 at /127.0.0.1:51084 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51084 dst: /127.0.0.1:39857 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:30,667 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1974465270_22 at /127.0.0.1:51004 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51004 dst: /127.0.0.1:39857 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:30,667 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:33222 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33222 dst: /127.0.0.1:34709 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:30,668 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1974465270_22 at /127.0.0.1:33196 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33196 dst: /127.0.0.1:34709 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:30,668 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:51026 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51026 dst: /127.0.0.1:39857 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:30,669 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f2859b3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:52:30,669 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:33220 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33220 dst: /127.0.0.1:34709 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:30,669 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3b918d2a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:52:30,669 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:52:30,670 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7eee535{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:52:30,670 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66182b08{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.log.dir/,STOPPED} 2024-11-07T12:52:30,672 WARN [BP-551199049-172.17.0.2-1730983938264 heartbeating to localhost/127.0.0.1:37737 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:52:30,672 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:52:30,672 WARN [BP-551199049-172.17.0.2-1730983938264 heartbeating to localhost/127.0.0.1:37737 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-551199049-172.17.0.2-1730983938264 (Datanode Uuid 732f757f-6769-4bb3-bc12-fa7b328c287d) service to localhost/127.0.0.1:37737 2024-11-07T12:52:30,672 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:52:30,672 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data3/current/BP-551199049-172.17.0.2-1730983938264 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:52:30,673 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data4/current/BP-551199049-172.17.0.2-1730983938264 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:52:30,673 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:52:30,674 WARN [DataStreamer for file /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 block BP-551199049-172.17.0.2-1730983938264:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:30,674 WARN [DataStreamer for file /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 block BP-551199049-172.17.0.2-1730983938264:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:30,674 WARN [DataStreamer for file /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/WALs/db9ad1cb6cf9,42273,1730983938946/db9ad1cb6cf9%2C42273%2C1730983938946.1730983939088 block BP-551199049-172.17.0.2-1730983938264:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:30,674 WARN [DataStreamer for file /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta block BP-551199049-172.17.0.2-1730983938264:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:30,675 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6de86657{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:52:30,676 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6787773a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:52:30,676 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:52:30,676 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a107105{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:52:30,676 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65dec1b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.log.dir/,STOPPED} 2024-11-07T12:52:30,677 WARN [BP-551199049-172.17.0.2-1730983938264 heartbeating to localhost/127.0.0.1:37737 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:52:30,677 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:52:30,677 WARN [BP-551199049-172.17.0.2-1730983938264 heartbeating to localhost/127.0.0.1:37737 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-551199049-172.17.0.2-1730983938264 (Datanode Uuid 39460e06-c321-4be7-b8ee-764203916adf) service to localhost/127.0.0.1:37737 2024-11-07T12:52:30,677 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:52:30,678 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data1/current/BP-551199049-172.17.0.2-1730983938264 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:52:30,678 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data2/current/BP-551199049-172.17.0.2-1730983938264 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:52:30,678 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:52:30,682 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb., hostname=db9ad1cb6cf9,42011,1730983938993, seqNum=2] 2024-11-07T12:52:30,683 ERROR [FSHLog-0-hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201-prefix:db9ad1cb6cf9,42011,1730983938993 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:30,683 WARN [FSHLog-0-hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201-prefix:db9ad1cb6cf9,42011,1730983938993 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:30,684 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:30,684 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db9ad1cb6cf9%2C42011%2C1730983938993:(num 1730983939397) roll requested 2024-11-07T12:52:30,684 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C42011%2C1730983938993.1730983950684 2024-11-07T12:52:30,689 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:30,690 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:30,690 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:30,690 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:30,690 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:30,690 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983950684 2024-11-07T12:52:30,690 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:30,690 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:30,691 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34499:34499),(127.0.0.1/127.0.0.1:43131:43131)] 2024-11-07T12:52:30,691 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 is not closed yet, will try archiving it next time 2024-11-07T12:52:30,691 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-07T12:52:30,692 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-07T12:52:30,692 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 2024-11-07T12:52:30,694 WARN [IPC Server handler 3 on default port 37737 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-07T12:52:30,698 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 after 4ms 2024-11-07T12:52:31,067 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:32,020 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:32,691 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:32,692 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983950684 2024-11-07T12:52:32,693 WARN [ResponseProcessor for block BP-551199049-172.17.0.2-1730983938264:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-551199049-172.17.0.2-1730983938264:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:32,693 WARN [DataStreamer for file /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983950684 block BP-551199049-172.17.0.2-1730983938264:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK], DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:32,693 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:52776 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:44697:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52776 dst: /127.0.0.1:44697 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:32,694 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:50628 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:41001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50628 dst: /127.0.0.1:41001 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:32,695 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4438143d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:52:32,695 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@32dac098{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:52:32,695 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:52:32,695 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@519de6b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:52:32,695 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@83c4e47{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.log.dir/,STOPPED} 2024-11-07T12:52:32,697 WARN [BP-551199049-172.17.0.2-1730983938264 heartbeating to localhost/127.0.0.1:37737 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:52:32,697 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:52:32,697 WARN [BP-551199049-172.17.0.2-1730983938264 heartbeating to localhost/127.0.0.1:37737 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-551199049-172.17.0.2-1730983938264 (Datanode Uuid 5fd60497-e5e1-4dd7-a515-1228a8c61184) service to localhost/127.0.0.1:37737 2024-11-07T12:52:32,697 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:52:32,697 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data7/current/BP-551199049-172.17.0.2-1730983938264 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:52:32,698 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data8/current/BP-551199049-172.17.0.2-1730983938264 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:52:32,698 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:52:33,067 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:34,020 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:34,691 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:34,692 WARN [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]] 2024-11-07T12:52:34,692 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db9ad1cb6cf9%2C42011%2C1730983938993:(num 1730983950684) roll requested 2024-11-07T12:52:34,692 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C42011%2C1730983938993.1730983954692 2024-11-07T12:52:34,695 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:34,696 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:34,696 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741839_1021 2024-11-07T12:52:34,698 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK] 2024-11-07T12:52:34,698 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 after 4006ms 2024-11-07T12:52:34,702 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T12:52:34,705 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:34,705 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:34,705 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:34,705 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:34,705 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:34,706 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983950684 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983954692 2024-11-07T12:52:34,706 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43131:43131),(127.0.0.1/127.0.0.1:40103:40103)] 2024-11-07T12:52:34,707 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 is not closed yet, will try archiving it next time 2024-11-07T12:52:34,707 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983950684 is not closed yet, will try archiving it next time 2024-11-07T12:52:34,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41001 is added to blk_1073741838_1020 (size=3600) 2024-11-07T12:52:34,708 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 is not closed yet, will try archiving it next time 2024-11-07T12:52:35,068 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:36,021 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:36,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741838_1020 (size=3600) 2024-11-07T12:52:36,707 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:36,707 WARN [ResponseProcessor for block BP-551199049-172.17.0.2-1730983938264:blk_1073741840_1022 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-551199049-172.17.0.2-1730983938264:blk_1073741840_1022 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:36,708 WARN [DataStreamer for file /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983954692 block BP-551199049-172.17.0.2-1730983938264:blk_1073741840_1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]) is bad. 2024-11-07T12:52:36,708 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:50636 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:41001:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50636 dst: /127.0.0.1:41001 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:36,708 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:58026 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:41865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58026 dst: /127.0.0.1:41865 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:36,710 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c81b75d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:52:36,710 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f55aa3b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:52:36,711 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:52:36,711 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50ff2063{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:52:36,711 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3dc0bdb3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.log.dir/,STOPPED} 2024-11-07T12:52:36,712 WARN [BP-551199049-172.17.0.2-1730983938264 heartbeating to localhost/127.0.0.1:37737 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:52:36,713 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:52:36,713 WARN [BP-551199049-172.17.0.2-1730983938264 heartbeating to localhost/127.0.0.1:37737 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-551199049-172.17.0.2-1730983938264 (Datanode Uuid c56183c4-a159-4ce2-a559-2d72997be584) service to localhost/127.0.0.1:37737 2024-11-07T12:52:36,713 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:52:36,713 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data9/current/BP-551199049-172.17.0.2-1730983938264 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:52:36,713 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data10/current/BP-551199049-172.17.0.2-1730983938264 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:52:36,714 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:52:36,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42011 {}] regionserver.HRegion(8855): Flush requested on 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:36,722 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68427f795afd4a50d7e5b045fbc4e8bb 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:52:36,740 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/6ffa3290ff0642f38e16a34939172c4d is 1080, key is row0002/info:/1730983952699/Put/seqid=0 2024-11-07T12:52:36,742 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:36,742 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741841_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK], DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:36,742 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741841_1024 2024-11-07T12:52:36,743 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK] 2024-11-07T12:52:36,744 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:36,745 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK], DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]) is bad. 2024-11-07T12:52:36,745 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741842_1025 2024-11-07T12:52:36,745 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK] 2024-11-07T12:52:36,747 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:58052 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data6]'}, localName='127.0.0.1:41865', datanodeUuid='4be56dc1-2e38-42da-b3d0-0feab1f74a00', xmitsInProgress=0}:Exception transferring block BP-551199049-172.17.0.2-1730983938264:blk_1073741843_1026 to mirror 127.0.0.1:41001 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:36,747 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41001 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:36,748 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:58052 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:52:36,748 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK], DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]) is bad. 2024-11-07T12:52:36,748 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741843_1026 2024-11-07T12:52:36,748 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:58052 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:41865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58052 dst: /127.0.0.1:41865 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:36,748 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK] 2024-11-07T12:52:36,749 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:36,750 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]) is bad. 2024-11-07T12:52:36,750 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741844_1027 2024-11-07T12:52:36,750 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK] 2024-11-07T12:52:36,751 WARN [IPC Server handler 1 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:52:36,751 WARN [IPC Server handler 1 on default port 37737 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:52:36,751 WARN [IPC Server handler 1 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:52:36,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741845_1028 (size=10347) 2024-11-07T12:52:37,068 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:37,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/6ffa3290ff0642f38e16a34939172c4d 2024-11-07T12:52:37,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/6ffa3290ff0642f38e16a34939172c4d as hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/6ffa3290ff0642f38e16a34939172c4d 2024-11-07T12:52:37,171 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/6ffa3290ff0642f38e16a34939172c4d, entries=5, sequenceid=11, filesize=10.1 K 2024-11-07T12:52:37,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 68427f795afd4a50d7e5b045fbc4e8bb in 450ms, sequenceid=11, compaction requested=false 2024-11-07T12:52:37,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68427f795afd4a50d7e5b045fbc4e8bb: 2024-11-07T12:52:37,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42011 {}] regionserver.HRegion(8855): Flush requested on 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:37,343 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68427f795afd4a50d7e5b045fbc4e8bb 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-07T12:52:37,347 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/9b4a2a88ca58465eaca49c7915702585 is 1080, key is row0007/info:/1730983956723/Put/seqid=0 2024-11-07T12:52:37,350 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34709 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:37,350 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39286 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data6]'}, localName='127.0.0.1:41865', datanodeUuid='4be56dc1-2e38-42da-b3d0-0feab1f74a00', xmitsInProgress=0}:Exception transferring block BP-551199049-172.17.0.2-1730983938264:blk_1073741846_1029 to mirror 127.0.0.1:34709 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:37,350 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK], DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]) is bad. 2024-11-07T12:52:37,350 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741846_1029 2024-11-07T12:52:37,350 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39286 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:52:37,350 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39286 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:41865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39286 dst: /127.0.0.1:41865 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:37,351 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK] 2024-11-07T12:52:37,352 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:37,352 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK], DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:37,352 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741847_1030 2024-11-07T12:52:37,353 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK] 2024-11-07T12:52:37,354 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:37,354 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]) is bad. 2024-11-07T12:52:37,354 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741848_1031 2024-11-07T12:52:37,354 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK] 2024-11-07T12:52:37,356 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41001 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:37,356 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39290 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data6]'}, localName='127.0.0.1:41865', datanodeUuid='4be56dc1-2e38-42da-b3d0-0feab1f74a00', xmitsInProgress=0}:Exception transferring block BP-551199049-172.17.0.2-1730983938264:blk_1073741849_1032 to mirror 127.0.0.1:41001 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:37,356 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK], DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]) is bad. 2024-11-07T12:52:37,356 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741849_1032 2024-11-07T12:52:37,356 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39290 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:52:37,356 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39290 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:41865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39290 dst: /127.0.0.1:41865 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:37,357 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK] 2024-11-07T12:52:37,357 WARN [IPC Server handler 2 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:52:37,357 WARN [IPC Server handler 2 on default port 37737 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:52:37,357 WARN [IPC Server handler 2 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:52:37,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741850_1033 (size=12506) 2024-11-07T12:52:37,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/9b4a2a88ca58465eaca49c7915702585 2024-11-07T12:52:37,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/9b4a2a88ca58465eaca49c7915702585 as hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/9b4a2a88ca58465eaca49c7915702585 2024-11-07T12:52:37,773 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/9b4a2a88ca58465eaca49c7915702585, entries=7, sequenceid=24, filesize=12.2 K 2024-11-07T12:52:37,774 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 68427f795afd4a50d7e5b045fbc4e8bb in 431ms, sequenceid=24, compaction requested=false 2024-11-07T12:52:37,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68427f795afd4a50d7e5b045fbc4e8bb: 2024-11-07T12:52:37,774 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-07T12:52:37,774 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:52:37,774 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/9b4a2a88ca58465eaca49c7915702585 because midkey is the same as first or last row 2024-11-07T12:52:38,021 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:38,707 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:38,707 WARN [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]] 2024-11-07T12:52:38,707 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db9ad1cb6cf9%2C42011%2C1730983938993:(num 1730983954692) roll requested 2024-11-07T12:52:38,708 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C42011%2C1730983938993.1730983958707 2024-11-07T12:52:38,711 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:38,711 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK], DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:38,711 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741851_1034 2024-11-07T12:52:38,712 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK] 2024-11-07T12:52:38,713 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:38,713 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK], DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]) is bad. 2024-11-07T12:52:38,713 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741852_1035 2024-11-07T12:52:38,713 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK] 2024-11-07T12:52:38,714 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:38,714 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]) is bad. 2024-11-07T12:52:38,714 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741853_1036 2024-11-07T12:52:38,715 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK] 2024-11-07T12:52:38,716 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:38,716 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]) is bad. 2024-11-07T12:52:38,716 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741854_1037 2024-11-07T12:52:38,716 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK] 2024-11-07T12:52:38,717 WARN [IPC Server handler 0 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:52:38,717 WARN [IPC Server handler 0 on default port 37737 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:52:38,717 WARN [IPC Server handler 0 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:52:38,719 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:38,719 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:38,720 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:38,720 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:38,720 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:38,720 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983954692 with entries=24, filesize=24.23 KB; new WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983958707 2024-11-07T12:52:38,721 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40103:40103)] 2024-11-07T12:52:38,721 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 is not closed yet, will try archiving it next time 2024-11-07T12:52:38,721 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983954692 is not closed yet, will try archiving it next time 2024-11-07T12:52:38,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741840_1023 (size=24823) 2024-11-07T12:52:38,724 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983950684 to hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/oldWALs/db9ad1cb6cf9%2C42011%2C1730983938993.1730983950684 2024-11-07T12:52:38,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42011 {}] regionserver.HRegion(8855): Flush requested on 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:38,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68427f795afd4a50d7e5b045fbc4e8bb 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-07T12:52:38,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/e13a17f63de0463a8937d493267659eb is 1079, key is tmprow/info:/1730983958759/Put/seqid=0 2024-11-07T12:52:38,766 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:38,766 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK], DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]) is bad. 2024-11-07T12:52:38,766 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741856_1039 2024-11-07T12:52:38,767 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK] 2024-11-07T12:52:38,768 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:38,768 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK], DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]) is bad. 2024-11-07T12:52:38,768 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741857_1040 2024-11-07T12:52:38,769 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK] 2024-11-07T12:52:38,770 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:38,770 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK], DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:38,770 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741858_1041 2024-11-07T12:52:38,771 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK] 2024-11-07T12:52:38,772 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:38,772 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]) is bad. 2024-11-07T12:52:38,772 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741859_1042 2024-11-07T12:52:38,773 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK] 2024-11-07T12:52:38,773 WARN [IPC Server handler 1 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:52:38,773 WARN [IPC Server handler 1 on default port 37737 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:52:38,773 WARN [IPC Server handler 1 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:52:38,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741860_1043 (size=6027) 2024-11-07T12:52:39,068 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:39,122 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 is not closed yet, will try archiving it next time 2024-11-07T12:52:39,177 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/e13a17f63de0463a8937d493267659eb 2024-11-07T12:52:39,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/e13a17f63de0463a8937d493267659eb as hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/e13a17f63de0463a8937d493267659eb 2024-11-07T12:52:39,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/e13a17f63de0463a8937d493267659eb, entries=1, sequenceid=34, filesize=5.9 K 2024-11-07T12:52:39,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 68427f795afd4a50d7e5b045fbc4e8bb in 431ms, sequenceid=34, compaction requested=true 2024-11-07T12:52:39,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68427f795afd4a50d7e5b045fbc4e8bb: 2024-11-07T12:52:39,191 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-07T12:52:39,191 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:52:39,191 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/9b4a2a88ca58465eaca49c7915702585 because midkey is the same as first or last row 2024-11-07T12:52:39,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 68427f795afd4a50d7e5b045fbc4e8bb:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:52:39,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:52:39,192 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:52:39,193 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:52:39,193 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.HStore(1541): 68427f795afd4a50d7e5b045fbc4e8bb/info is initiating minor compaction (all files) 2024-11-07T12:52:39,193 INFO [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 68427f795afd4a50d7e5b045fbc4e8bb/info in TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. 2024-11-07T12:52:39,193 INFO [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/6ffa3290ff0642f38e16a34939172c4d, hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/9b4a2a88ca58465eaca49c7915702585, hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/e13a17f63de0463a8937d493267659eb] into tmpdir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp, totalSize=28.2 K 2024-11-07T12:52:39,194 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6ffa3290ff0642f38e16a34939172c4d, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1730983952699 2024-11-07T12:52:39,194 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9b4a2a88ca58465eaca49c7915702585, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1730983956723 2024-11-07T12:52:39,194 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] compactions.Compactor(225): Compacting e13a17f63de0463a8937d493267659eb, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1730983958759 2024-11-07T12:52:39,207 INFO [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 68427f795afd4a50d7e5b045fbc4e8bb#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:52:39,207 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/f57db30e45a547cda70dc68e5f8f353f is 1080, key is row0002/info:/1730983952699/Put/seqid=0 2024-11-07T12:52:39,209 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:39,209 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]) is bad. 2024-11-07T12:52:39,209 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741861_1044 2024-11-07T12:52:39,210 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK] 2024-11-07T12:52:39,211 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:39,211 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK], DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:39,211 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741862_1045 2024-11-07T12:52:39,211 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK] 2024-11-07T12:52:39,213 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41001 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:39,213 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39338 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data6]'}, localName='127.0.0.1:41865', datanodeUuid='4be56dc1-2e38-42da-b3d0-0feab1f74a00', xmitsInProgress=0}:Exception transferring block BP-551199049-172.17.0.2-1730983938264:blk_1073741863_1046 to mirror 127.0.0.1:41001 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:39,213 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK], DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]) is bad. 2024-11-07T12:52:39,213 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741863_1046 2024-11-07T12:52:39,213 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39338 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:52:39,214 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39338 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:41865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39338 dst: /127.0.0.1:41865 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:39,214 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK] 2024-11-07T12:52:39,216 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39857 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:39,216 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39340 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data6]'}, localName='127.0.0.1:41865', datanodeUuid='4be56dc1-2e38-42da-b3d0-0feab1f74a00', xmitsInProgress=0}:Exception transferring block BP-551199049-172.17.0.2-1730983938264:blk_1073741864_1047 to mirror 127.0.0.1:39857 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:39,216 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK], DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]) is bad. 2024-11-07T12:52:39,216 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741864_1047 2024-11-07T12:52:39,216 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39340 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:52:39,216 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39340 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:41865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39340 dst: /127.0.0.1:41865 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:39,216 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK] 2024-11-07T12:52:39,217 WARN [IPC Server handler 4 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:52:39,217 WARN [IPC Server handler 4 on default port 37737 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:52:39,217 WARN [IPC Server handler 4 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:52:39,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741865_1048 (size=17994) 2024-11-07T12:52:39,627 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/f57db30e45a547cda70dc68e5f8f353f as hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/f57db30e45a547cda70dc68e5f8f353f 2024-11-07T12:52:39,633 INFO [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 68427f795afd4a50d7e5b045fbc4e8bb/info of 68427f795afd4a50d7e5b045fbc4e8bb into f57db30e45a547cda70dc68e5f8f353f(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:52:39,633 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 68427f795afd4a50d7e5b045fbc4e8bb: 2024-11-07T12:52:39,634 INFO [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb., storeName=68427f795afd4a50d7e5b045fbc4e8bb/info, priority=13, startTime=1730983959191; duration=0sec 2024-11-07T12:52:39,634 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-07T12:52:39,634 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:52:39,634 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/f57db30e45a547cda70dc68e5f8f353f because midkey is the same as first or last row 2024-11-07T12:52:39,634 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-07T12:52:39,634 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:52:39,634 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/f57db30e45a547cda70dc68e5f8f353f because midkey is the same as first or last row 2024-11-07T12:52:39,634 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-07T12:52:39,634 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:52:39,634 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/f57db30e45a547cda70dc68e5f8f353f because midkey is the same as first or last row 2024-11-07T12:52:39,634 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:52:39,634 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 68427f795afd4a50d7e5b045fbc4e8bb:info 2024-11-07T12:52:40,021 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:40,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42011 {}] regionserver.HRegion(8855): Flush requested on 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:40,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68427f795afd4a50d7e5b045fbc4e8bb 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-07T12:52:40,183 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/d0aa12b1cc3c4891930307f48c5db453 is 1079, key is tmprow/info:/1730983960177/Put/seqid=0 2024-11-07T12:52:40,184 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:40,184 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK], DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]) is bad. 2024-11-07T12:52:40,184 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741866_1049 2024-11-07T12:52:40,185 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK] 2024-11-07T12:52:40,186 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:40,186 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]) is bad. 2024-11-07T12:52:40,186 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741867_1050 2024-11-07T12:52:40,187 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK] 2024-11-07T12:52:40,188 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:40,188 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK], DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]) is bad. 2024-11-07T12:52:40,188 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741868_1051 2024-11-07T12:52:40,189 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK] 2024-11-07T12:52:40,190 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:40,190 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:40,190 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741869_1052 2024-11-07T12:52:40,190 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK] 2024-11-07T12:52:40,191 WARN [IPC Server handler 0 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:52:40,191 WARN [IPC Server handler 0 on default port 37737 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:52:40,191 WARN [IPC Server handler 0 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:52:40,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741870_1053 (size=6027) 2024-11-07T12:52:40,328 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15b8af8[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41865, datanodeUuid=4be56dc1-2e38-42da-b3d0-0feab1f74a00, infoPort=40103, infoSecurePort=0, ipcPort=41085, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264):Failed to transfer BP-551199049-172.17.0.2-1730983938264:blk_1073741850_1033 to 127.0.0.1:39857 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:40,328 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f9c657b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41865, datanodeUuid=4be56dc1-2e38-42da-b3d0-0feab1f74a00, infoPort=40103, infoSecurePort=0, ipcPort=41085, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264):Failed to transfer BP-551199049-172.17.0.2-1730983938264:blk_1073741845_1028 to 127.0.0.1:44697 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:40,595 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/d0aa12b1cc3c4891930307f48c5db453 2024-11-07T12:52:40,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/d0aa12b1cc3c4891930307f48c5db453 as hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/d0aa12b1cc3c4891930307f48c5db453 2024-11-07T12:52:40,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/d0aa12b1cc3c4891930307f48c5db453, entries=1, sequenceid=45, filesize=5.9 K 2024-11-07T12:52:40,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 68427f795afd4a50d7e5b045fbc4e8bb in 430ms, sequenceid=45, compaction requested=false 2024-11-07T12:52:40,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68427f795afd4a50d7e5b045fbc4e8bb: 2024-11-07T12:52:40,608 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-07T12:52:40,608 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:52:40,609 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/f57db30e45a547cda70dc68e5f8f353f because midkey is the same as first or last row 2024-11-07T12:52:40,724 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:40,725 WARN [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]] 2024-11-07T12:52:40,725 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db9ad1cb6cf9%2C42011%2C1730983938993:(num 1730983958707) roll requested 2024-11-07T12:52:40,725 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C42011%2C1730983938993.1730983960725 2024-11-07T12:52:40,728 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:40,728 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK], DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]) is bad. 2024-11-07T12:52:40,728 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741871_1054 2024-11-07T12:52:40,728 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK] 2024-11-07T12:52:40,729 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:40,730 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK], DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]) is bad. 2024-11-07T12:52:40,730 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741872_1055 2024-11-07T12:52:40,730 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK] 2024-11-07T12:52:40,732 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39364 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741873_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data6]'}, localName='127.0.0.1:41865', datanodeUuid='4be56dc1-2e38-42da-b3d0-0feab1f74a00', xmitsInProgress=0}:Exception transferring block BP-551199049-172.17.0.2-1730983938264:blk_1073741873_1056 to mirror 127.0.0.1:39857 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:40,732 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39857 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:40,732 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39364 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741873_1056] {}] datanode.BlockReceiver(316): Block 1073741873 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-07T12:52:40,732 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK], DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]) is bad. 2024-11-07T12:52:40,732 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741873_1056 2024-11-07T12:52:40,732 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39364 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741873_1056] {}] datanode.DataXceiver(331): 127.0.0.1:41865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39364 dst: /127.0.0.1:41865 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:40,733 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK] 2024-11-07T12:52:40,734 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:40,734 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:40,734 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741874_1057 2024-11-07T12:52:40,735 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK] 2024-11-07T12:52:40,735 WARN [IPC Server handler 0 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:52:40,735 WARN [IPC Server handler 0 on default port 37737 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:52:40,735 WARN [IPC Server handler 0 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:52:40,737 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:40,738 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:40,738 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:40,738 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:40,738 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:40,738 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983958707 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983960725 2024-11-07T12:52:40,739 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40103:40103)] 2024-11-07T12:52:40,739 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 is not closed yet, will try archiving it next time 2024-11-07T12:52:40,739 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983958707 is not closed yet, will try archiving it next time 2024-11-07T12:52:40,739 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983954692 to hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/oldWALs/db9ad1cb6cf9%2C42011%2C1730983938993.1730983954692 2024-11-07T12:52:40,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741855_1038 (size=13591) 2024-11-07T12:52:41,069 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:41,140 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 is not closed yet, will try archiving it next time 2024-11-07T12:52:41,328 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f9c657b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41865, datanodeUuid=4be56dc1-2e38-42da-b3d0-0feab1f74a00, infoPort=40103, infoSecurePort=0, ipcPort=41085, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264):Failed to transfer BP-551199049-172.17.0.2-1730983938264:blk_1073741840_1023 to 127.0.0.1:44697 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:41,328 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15b8af8[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41865, datanodeUuid=4be56dc1-2e38-42da-b3d0-0feab1f74a00, infoPort=40103, infoSecurePort=0, ipcPort=41085, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264):Failed to transfer BP-551199049-172.17.0.2-1730983938264:blk_1073741860_1043 to 127.0.0.1:41001 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:41,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42011 {}] regionserver.HRegion(8855): Flush requested on 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:41,596 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68427f795afd4a50d7e5b045fbc4e8bb 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-07T12:52:41,601 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/80b233387f0346a8bbbc22183e238509 is 1079, key is tmprow/info:/1730983961595/Put/seqid=0 2024-11-07T12:52:41,604 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41001 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:41,604 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39386 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data6]'}, localName='127.0.0.1:41865', datanodeUuid='4be56dc1-2e38-42da-b3d0-0feab1f74a00', xmitsInProgress=0}:Exception transferring block BP-551199049-172.17.0.2-1730983938264:blk_1073741876_1059 to mirror 127.0.0.1:41001 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:41,604 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK], DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]) is bad. 2024-11-07T12:52:41,604 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741876_1059 2024-11-07T12:52:41,604 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39386 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:52:41,604 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39386 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:41865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39386 dst: /127.0.0.1:41865 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:41,604 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK] 2024-11-07T12:52:41,606 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:41,606 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]) is bad. 2024-11-07T12:52:41,606 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741877_1060 2024-11-07T12:52:41,606 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK] 2024-11-07T12:52:41,607 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:41,607 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK], DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]) is bad. 2024-11-07T12:52:41,607 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741878_1061 2024-11-07T12:52:41,608 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK] 2024-11-07T12:52:41,610 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44697 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:41,610 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39396 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data6]'}, localName='127.0.0.1:41865', datanodeUuid='4be56dc1-2e38-42da-b3d0-0feab1f74a00', xmitsInProgress=0}:Exception transferring block BP-551199049-172.17.0.2-1730983938264:blk_1073741879_1062 to mirror 127.0.0.1:44697 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:41,610 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK], DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:41,610 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741879_1062 2024-11-07T12:52:41,610 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39396 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:52:41,610 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39396 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:41865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39396 dst: /127.0.0.1:41865 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:41,610 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK] 2024-11-07T12:52:41,611 WARN [IPC Server handler 3 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:52:41,611 WARN [IPC Server handler 3 on default port 37737 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:52:41,611 WARN [IPC Server handler 3 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:52:41,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741880_1063 (size=6027) 2024-11-07T12:52:42,014 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/80b233387f0346a8bbbc22183e238509 2024-11-07T12:52:42,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/80b233387f0346a8bbbc22183e238509 as hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/80b233387f0346a8bbbc22183e238509 2024-11-07T12:52:42,022 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:42,026 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/80b233387f0346a8bbbc22183e238509, entries=1, sequenceid=55, filesize=5.9 K 2024-11-07T12:52:42,027 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 68427f795afd4a50d7e5b045fbc4e8bb in 431ms, sequenceid=55, compaction requested=true 2024-11-07T12:52:42,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68427f795afd4a50d7e5b045fbc4e8bb: 2024-11-07T12:52:42,027 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-07T12:52:42,027 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:52:42,027 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/f57db30e45a547cda70dc68e5f8f353f because midkey is the same as first or last row 2024-11-07T12:52:42,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 68427f795afd4a50d7e5b045fbc4e8bb:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:52:42,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:52:42,027 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:52:42,029 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:52:42,029 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.HStore(1541): 68427f795afd4a50d7e5b045fbc4e8bb/info is initiating minor compaction (all files) 2024-11-07T12:52:42,029 INFO [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 68427f795afd4a50d7e5b045fbc4e8bb/info in TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. 2024-11-07T12:52:42,029 INFO [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/f57db30e45a547cda70dc68e5f8f353f, hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/d0aa12b1cc3c4891930307f48c5db453, hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/80b233387f0346a8bbbc22183e238509] into tmpdir=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp, totalSize=29.3 K 2024-11-07T12:52:42,029 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] compactions.Compactor(225): Compacting f57db30e45a547cda70dc68e5f8f353f, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1730983952699 2024-11-07T12:52:42,030 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] compactions.Compactor(225): Compacting d0aa12b1cc3c4891930307f48c5db453, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1730983960177 2024-11-07T12:52:42,030 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] compactions.Compactor(225): Compacting 80b233387f0346a8bbbc22183e238509, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1730983961595 2024-11-07T12:52:42,044 INFO [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 68427f795afd4a50d7e5b045fbc4e8bb#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:52:42,045 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/cd806d12265c44f3aea3e8aea864ba0d is 1080, key is row0002/info:/1730983952699/Put/seqid=0 2024-11-07T12:52:42,046 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:42,047 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK], DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]) is bad. 2024-11-07T12:52:42,047 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741881_1064 2024-11-07T12:52:42,047 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34709,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK] 2024-11-07T12:52:42,049 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39857 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:42,049 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39422 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741882_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data6]'}, localName='127.0.0.1:41865', datanodeUuid='4be56dc1-2e38-42da-b3d0-0feab1f74a00', xmitsInProgress=0}:Exception transferring block BP-551199049-172.17.0.2-1730983938264:blk_1073741882_1065 to mirror 127.0.0.1:39857 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:42,049 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK], DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]) is bad. 2024-11-07T12:52:42,049 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741882_1065 2024-11-07T12:52:42,049 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39422 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741882_1065] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:52:42,049 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39422 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741882_1065] {}] datanode.DataXceiver(331): 127.0.0.1:41865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39422 dst: /127.0.0.1:41865 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:42,050 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK] 2024-11-07T12:52:42,052 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44697 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:42,052 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39434 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741883_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data6]'}, localName='127.0.0.1:41865', datanodeUuid='4be56dc1-2e38-42da-b3d0-0feab1f74a00', xmitsInProgress=0}:Exception transferring block BP-551199049-172.17.0.2-1730983938264:blk_1073741883_1066 to mirror 127.0.0.1:44697 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:42,052 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK], DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:42,052 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741883_1066 2024-11-07T12:52:42,052 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39434 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741883_1066] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:52:42,052 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:39434 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741883_1066] {}] datanode.DataXceiver(331): 127.0.0.1:41865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39434 dst: /127.0.0.1:41865 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:42,053 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK] 2024-11-07T12:52:42,054 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:42,054 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]) is bad. 2024-11-07T12:52:42,054 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741884_1067 2024-11-07T12:52:42,054 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK] 2024-11-07T12:52:42,055 WARN [IPC Server handler 0 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:52:42,055 WARN [IPC Server handler 0 on default port 37737 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:52:42,055 WARN [IPC Server handler 0 on default port 37737 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:52:42,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741885_1068 (size=18097) 2024-11-07T12:52:42,465 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/cd806d12265c44f3aea3e8aea864ba0d as hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/cd806d12265c44f3aea3e8aea864ba0d 2024-11-07T12:52:42,471 INFO [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 68427f795afd4a50d7e5b045fbc4e8bb/info of 68427f795afd4a50d7e5b045fbc4e8bb into cd806d12265c44f3aea3e8aea864ba0d(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:52:42,471 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 68427f795afd4a50d7e5b045fbc4e8bb: 2024-11-07T12:52:42,471 INFO [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb., storeName=68427f795afd4a50d7e5b045fbc4e8bb/info, priority=13, startTime=1730983962027; duration=0sec 2024-11-07T12:52:42,472 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-07T12:52:42,472 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:52:42,472 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/cd806d12265c44f3aea3e8aea864ba0d because midkey is the same as first or last row 2024-11-07T12:52:42,472 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-07T12:52:42,472 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:52:42,472 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/cd806d12265c44f3aea3e8aea864ba0d because midkey is the same as first or last row 2024-11-07T12:52:42,472 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-07T12:52:42,472 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:52:42,472 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/cd806d12265c44f3aea3e8aea864ba0d because midkey is the same as first or last row 2024-11-07T12:52:42,472 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:52:42,472 DEBUG [RS:0;db9ad1cb6cf9:42011-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 68427f795afd4a50d7e5b045fbc4e8bb:info 2024-11-07T12:52:42,739 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:42,739 WARN [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-07T12:52:42,821 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:52:42,825 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:52:42,826 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:52:42,826 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:52:42,826 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:52:42,826 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6869cf12{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:52:42,827 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39eb7ad6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:52:42,948 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2c79190f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/java.io.tmpdir/jetty-localhost-33711-hadoop-hdfs-3_4_1-tests_jar-_-any-6070370718214037464/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:52:42,949 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70a14b1c{HTTP/1.1, (http/1.1)}{localhost:33711} 2024-11-07T12:52:42,949 INFO [Time-limited test {}] server.Server(415): Started @128218ms 2024-11-07T12:52:42,951 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:52:43,041 WARN [Thread-986 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:52:43,050 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf718804d701f1810 with lease ID 0x780a2b110f549ce8: from storage DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899 node DatanodeRegistration(127.0.0.1:40639, datanodeUuid=732f757f-6769-4bb3-bc12-fa7b328c287d, infoPort=45679, infoSecurePort=0, ipcPort=34229, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:52:43,050 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf718804d701f1810 with lease ID 0x780a2b110f549ce8: from storage DS-9af7146e-b0f6-435b-bc0d-d4eac6f6348f node DatanodeRegistration(127.0.0.1:40639, datanodeUuid=732f757f-6769-4bb3-bc12-fa7b328c287d, infoPort=45679, infoSecurePort=0, ipcPort=34229, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:52:43,069 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:43,328 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15b8af8[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41865, datanodeUuid=4be56dc1-2e38-42da-b3d0-0feab1f74a00, infoPort=40103, infoSecurePort=0, ipcPort=41085, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264):Failed to transfer BP-551199049-172.17.0.2-1730983938264:blk_1073741865_1048 to 127.0.0.1:44697 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:43,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741870_1053 (size=6027) 2024-11-07T12:52:44,022 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:44,328 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f9c657b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41865, datanodeUuid=4be56dc1-2e38-42da-b3d0-0feab1f74a00, infoPort=40103, infoSecurePort=0, ipcPort=41085, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264):Failed to transfer BP-551199049-172.17.0.2-1730983938264:blk_1073741855_1038 to 127.0.0.1:41001 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:44,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741880_1063 (size=6027) 2024-11-07T12:52:44,740 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:45,069 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:46,022 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:46,328 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15b8af8[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41865, datanodeUuid=4be56dc1-2e38-42da-b3d0-0feab1f74a00, infoPort=40103, infoSecurePort=0, ipcPort=41085, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264):Failed to transfer BP-551199049-172.17.0.2-1730983938264:blk_1073741885_1068 to 127.0.0.1:41001 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:46,740 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:47,070 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:48,023 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:48,740 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:48,927 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T12:52:49,070 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:49,167 ERROR [FSHLog-0-hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData-prefix:db9ad1cb6cf9,42273,1730983938946 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:49,167 WARN [FSHLog-0-hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData-prefix:db9ad1cb6cf9,42273,1730983938946 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:49,167 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog db9ad1cb6cf9%2C42273%2C1730983938946:(num 1730983939088) roll requested 2024-11-07T12:52:49,167 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C42273%2C1730983938946.1730983969167 2024-11-07T12:52:49,171 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44697 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:49,171 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1974465270_22 at /127.0.0.1:53802 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data6]'}, localName='127.0.0.1:41865', datanodeUuid='4be56dc1-2e38-42da-b3d0-0feab1f74a00', xmitsInProgress=0}:Exception transferring block BP-551199049-172.17.0.2-1730983938264:blk_1073741886_1069 to mirror 127.0.0.1:44697 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:49,171 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1974465270_22 at /127.0.0.1:53802 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-07T12:52:49,171 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK], DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:49,172 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741886_1069 2024-11-07T12:52:49,172 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1974465270_22 at /127.0.0.1:53802 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:41865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53802 dst: /127.0.0.1:41865 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:49,172 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK] 2024-11-07T12:52:49,173 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:49,174 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK], DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]) is bad. 2024-11-07T12:52:49,174 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741887_1070 2024-11-07T12:52:49,174 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK] 2024-11-07T12:52:49,176 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41001 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:49,176 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1974465270_22 at /127.0.0.1:53804 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741888_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data6]'}, localName='127.0.0.1:41865', datanodeUuid='4be56dc1-2e38-42da-b3d0-0feab1f74a00', xmitsInProgress=0}:Exception transferring block BP-551199049-172.17.0.2-1730983938264:blk_1073741888_1071 to mirror 127.0.0.1:41001 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:49,176 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK], DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]) is bad. 2024-11-07T12:52:49,176 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741888_1071 2024-11-07T12:52:49,177 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1974465270_22 at /127.0.0.1:53804 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741888_1071] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-07T12:52:49,177 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1974465270_22 at /127.0.0.1:53804 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741888_1071] {}] datanode.DataXceiver(331): 127.0.0.1:41865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53804 dst: /127.0.0.1:41865 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:49,177 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK] 2024-11-07T12:52:49,181 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:49,181 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:49,182 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:49,182 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:49,182 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:49,182 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/WALs/db9ad1cb6cf9,42273,1730983938946/db9ad1cb6cf9%2C42273%2C1730983938946.1730983939088 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/WALs/db9ad1cb6cf9,42273,1730983938946/db9ad1cb6cf9%2C42273%2C1730983938946.1730983969167 2024-11-07T12:52:49,182 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:49,182 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:49,182 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/WALs/db9ad1cb6cf9,42273,1730983938946/db9ad1cb6cf9%2C42273%2C1730983938946.1730983939088 2024-11-07T12:52:49,183 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45679:45679),(127.0.0.1/127.0.0.1:40103:40103)] 2024-11-07T12:52:49,183 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/WALs/db9ad1cb6cf9,42273,1730983938946/db9ad1cb6cf9%2C42273%2C1730983938946.1730983939088 is not closed yet, will try archiving it next time 2024-11-07T12:52:49,183 WARN [IPC Server handler 2 on default port 37737 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/WALs/db9ad1cb6cf9,42273,1730983938946/db9ad1cb6cf9%2C42273%2C1730983938946.1730983939088 has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741830_1006 2024-11-07T12:52:49,183 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/WALs/db9ad1cb6cf9,42273,1730983938946/db9ad1cb6cf9%2C42273%2C1730983938946.1730983939088 after 1ms 2024-11-07T12:52:50,023 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:50,741 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:52,023 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:52,741 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:53,063 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7b1a5210 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-551199049-172.17.0.2-1730983938264:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:39857,null,null]) java.net.ConnectException: Call From db9ad1cb6cf9/172.17.0.2 to localhost:36397 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-07T12:52:53,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741833_1019 (size=455) 2024-11-07T12:52:53,184 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/WALs/db9ad1cb6cf9,42273,1730983938946/db9ad1cb6cf9%2C42273%2C1730983938946.1730983939088 after 4002ms 2024-11-07T12:52:53,715 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 to hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/oldWALs/db9ad1cb6cf9%2C42011%2C1730983938993.1730983939397 2024-11-07T12:52:53,716 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983958707 to hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/oldWALs/db9ad1cb6cf9%2C42011%2C1730983938993.1730983958707 2024-11-07T12:52:54,024 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:54,741 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:55,045 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4aabcc5e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40639, datanodeUuid=732f757f-6769-4bb3-bc12-fa7b328c287d, infoPort=45679, infoSecurePort=0, ipcPort=34229, storageInfo=lv=-57;cid=testClusterID;nsid=1433542943;c=1730983938264):Failed to transfer BP-551199049-172.17.0.2-1730983938264:blk_1073741833_1019 to 127.0.0.1:44697 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:56,024 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:56,671 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C42011%2C1730983938993.1730983976671 2024-11-07T12:52:56,677 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:56,677 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:56,677 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:56,677 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:56,677 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:56,677 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983960725 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983976671 2024-11-07T12:52:56,678 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40103:40103),(127.0.0.1/127.0.0.1:45679:45679)] 2024-11-07T12:52:56,678 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983960725 is not closed yet, will try archiving it next time 2024-11-07T12:52:56,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741875_1058 (size=12911) 2024-11-07T12:52:56,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42011 {}] regionserver.HRegion(8855): Flush requested on 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:56,683 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 68427f795afd4a50d7e5b045fbc4e8bb 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-07T12:52:56,687 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/beca6d12c3424d3193f18ca9da4bc227 is 1080, key is row0013/info:/1730983976679/Put/seqid=0 2024-11-07T12:52:56,689 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:56,689 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:56,689 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741891_1075 2024-11-07T12:52:56,690 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK] 2024-11-07T12:52:56,691 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:56,691 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK], DatanodeInfoWithStorage[127.0.0.1:40639,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]) is bad. 2024-11-07T12:52:56,691 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741892_1076 2024-11-07T12:52:56,692 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK] 2024-11-07T12:52:56,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741893_1077 (size=8190) 2024-11-07T12:52:56,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741893_1077 (size=8190) 2024-11-07T12:52:56,698 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/beca6d12c3424d3193f18ca9da4bc227 2024-11-07T12:52:56,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/beca6d12c3424d3193f18ca9da4bc227 as hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/beca6d12c3424d3193f18ca9da4bc227 2024-11-07T12:52:56,710 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/beca6d12c3424d3193f18ca9da4bc227, entries=3, sequenceid=66, filesize=8.0 K 2024-11-07T12:52:56,711 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for 68427f795afd4a50d7e5b045fbc4e8bb in 29ms, sequenceid=66, compaction requested=false 2024-11-07T12:52:56,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 68427f795afd4a50d7e5b045fbc4e8bb: 2024-11-07T12:52:56,711 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-07T12:52:56,711 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:52:56,711 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/cd806d12265c44f3aea3e8aea864ba0d because midkey is the same as first or last row 2024-11-07T12:52:56,741 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:56,742 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-07T12:52:56,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-07T12:52:56,900 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:52:56,900 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:52:56,900 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:56,900 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:56,900 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-07T12:52:56,900 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T12:52:56,900 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1912091167, stopped=false 2024-11-07T12:52:56,901 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=db9ad1cb6cf9,42273,1730983938946 2024-11-07T12:52:56,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:52:56,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:52:56,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43605-0x1001a4cd6100002, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:52:56,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:56,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:56,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43605-0x1001a4cd6100002, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:52:56,903 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:52:56,903 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:52:56,903 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:52:56,903 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:56,903 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'db9ad1cb6cf9,42011,1730983938993' ***** 2024-11-07T12:52:56,903 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T12:52:56,903 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'db9ad1cb6cf9,43605,1730983939942' ***** 2024-11-07T12:52:56,903 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T12:52:56,903 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:52:56,904 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:52:56,904 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:52:56,904 INFO [RS:0;db9ad1cb6cf9:42011 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:52:56,904 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43605-0x1001a4cd6100002, quorum=127.0.0.1:58729, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:52:56,904 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:52:56,904 INFO [RS:0;db9ad1cb6cf9:42011 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:52:56,904 INFO [RS:1;db9ad1cb6cf9:43605 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:52:56,904 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer(3091): Received CLOSE for 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:56,904 INFO [RS:1;db9ad1cb6cf9:43605 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:52:56,904 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.HRegionServer(959): stopping server db9ad1cb6cf9,43605,1730983939942 2024-11-07T12:52:56,904 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:52:56,904 INFO [RS:1;db9ad1cb6cf9:43605 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;db9ad1cb6cf9:43605. 2024-11-07T12:52:56,904 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:52:56,904 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:56,904 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer(959): stopping server db9ad1cb6cf9,42011,1730983938993 2024-11-07T12:52:56,904 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.HRegionServer(976): stopping server db9ad1cb6cf9,43605,1730983939942; all regions closed. 2024-11-07T12:52:56,904 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T12:52:56,904 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:52:56,904 INFO [RS:0;db9ad1cb6cf9:42011 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;db9ad1cb6cf9:42011. 2024-11-07T12:52:56,904 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:52:56,904 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:52:56,905 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:52:56,905 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:52:56,905 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 68427f795afd4a50d7e5b045fbc4e8bb, disabling compactions & flushes 2024-11-07T12:52:56,905 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T12:52:56,905 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. 2024-11-07T12:52:56,905 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. 2024-11-07T12:52:56,905 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. after waiting 0 ms 2024-11-07T12:52:56,905 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. 2024-11-07T12:52:56,905 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 68427f795afd4a50d7e5b045fbc4e8bb 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-07T12:52:56,905 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:52:56,905 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-07T12:52:56,905 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:56,905 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-07T12:52:56,906 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:56,906 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 68427f795afd4a50d7e5b045fbc4e8bb=TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb.} 2024-11-07T12:52:56,906 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 68427f795afd4a50d7e5b045fbc4e8bb 2024-11-07T12:52:56,906 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:56,906 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:56,906 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:52:56,906 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:52:56,906 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:52:56,906 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:56,906 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:52:56,906 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:52:56,906 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-07T12:52:56,906 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:56,907 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:56,907 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 2024-11-07T12:52:56,907 ERROR [FSHLog-0-hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201-prefix:db9ad1cb6cf9,42011,1730983938993.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:56,907 WARN [FSHLog-0-hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201-prefix:db9ad1cb6cf9,42011,1730983938993.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:56,907 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db9ad1cb6cf9%2C42011%2C1730983938993.meta:.meta(num 1730983939786) roll requested 2024-11-07T12:52:56,907 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983976907.meta 2024-11-07T12:52:56,907 WARN [IPC Server handler 4 on default port 37737 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 has not been closed. Lease recovery is in progress. RecoveryId = 1078 for block blk_1073741837_1013 2024-11-07T12:52:56,907 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 after 0ms 2024-11-07T12:52:56,910 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:56,910 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741894_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK], DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]) is bad. 2024-11-07T12:52:56,910 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741894_1079 2024-11-07T12:52:56,911 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK] 2024-11-07T12:52:56,912 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/a065ac0a91834991bf96a90d692102e5 is 1080, key is row0015/info:/1730983976684/Put/seqid=0 2024-11-07T12:52:56,913 WARN [Thread-1035 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:56,913 WARN [Thread-1035 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741896_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:56,913 WARN [Thread-1035 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741896_1081 2024-11-07T12:52:56,914 WARN [Thread-1035 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK] 2024-11-07T12:52:56,915 WARN [Thread-1035 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:56,915 WARN [Thread-1035 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741897_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]) is bad. 2024-11-07T12:52:56,915 WARN [Thread-1035 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741897_1082 2024-11-07T12:52:56,915 WARN [Thread-1035 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK] 2024-11-07T12:52:56,918 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:56,918 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:56,919 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:56,919 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:56,919 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:56,919 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983976907.meta 2024-11-07T12:52:56,919 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:56,919 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:56,920 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta 2024-11-07T12:52:56,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741898_1083 (size=14660) 2024-11-07T12:52:56,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741898_1083 (size=14660) 2024-11-07T12:52:56,920 WARN [IPC Server handler 0 on default port 37737 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta has not been closed. Lease recovery is in progress. RecoveryId = 1084 for block blk_1073741834_1010 2024-11-07T12:52:56,920 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta after 0ms 2024-11-07T12:52:56,920 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/a065ac0a91834991bf96a90d692102e5 2024-11-07T12:52:56,925 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45679:45679),(127.0.0.1/127.0.0.1:40103:40103)] 2024-11-07T12:52:56,925 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta is not closed yet, will try archiving it next time 2024-11-07T12:52:56,927 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/.tmp/info/a065ac0a91834991bf96a90d692102e5 as hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/a065ac0a91834991bf96a90d692102e5 2024-11-07T12:52:56,933 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/a065ac0a91834991bf96a90d692102e5, entries=9, sequenceid=78, filesize=14.3 K 2024-11-07T12:52:56,934 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 68427f795afd4a50d7e5b045fbc4e8bb in 29ms, sequenceid=78, compaction requested=true 2024-11-07T12:52:56,940 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/6ffa3290ff0642f38e16a34939172c4d, hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/9b4a2a88ca58465eaca49c7915702585, hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/f57db30e45a547cda70dc68e5f8f353f, hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/e13a17f63de0463a8937d493267659eb, hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/d0aa12b1cc3c4891930307f48c5db453, hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/80b233387f0346a8bbbc22183e238509] to archive 2024-11-07T12:52:56,941 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T12:52:56,943 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/6ffa3290ff0642f38e16a34939172c4d to hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/6ffa3290ff0642f38e16a34939172c4d 2024-11-07T12:52:56,945 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/9b4a2a88ca58465eaca49c7915702585 to hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/9b4a2a88ca58465eaca49c7915702585 2024-11-07T12:52:56,946 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/f57db30e45a547cda70dc68e5f8f353f to hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/f57db30e45a547cda70dc68e5f8f353f 2024-11-07T12:52:56,948 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/e13a17f63de0463a8937d493267659eb to hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/e13a17f63de0463a8937d493267659eb 2024-11-07T12:52:56,948 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/.tmp/info/dc6a787174d74ca1a1a08168c79ad0b7 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb./info:regioninfo/1730983940420/Put/seqid=0 2024-11-07T12:52:56,949 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/d0aa12b1cc3c4891930307f48c5db453 to hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/d0aa12b1cc3c4891930307f48c5db453 2024-11-07T12:52:56,951 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/80b233387f0346a8bbbc22183e238509 to hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/info/80b233387f0346a8bbbc22183e238509 2024-11-07T12:52:56,951 WARN [Thread-1046 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44697 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:56,951 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:33768 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741899_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data6]'}, localName='127.0.0.1:41865', datanodeUuid='4be56dc1-2e38-42da-b3d0-0feab1f74a00', xmitsInProgress=0}:Exception transferring block BP-551199049-172.17.0.2-1730983938264:blk_1073741899_1085 to mirror 127.0.0.1:44697 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:56,951 WARN [Thread-1046 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK], DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:56,951 WARN [Thread-1046 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741899_1085 2024-11-07T12:52:56,951 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:33768 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741899_1085] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:52:56,951 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=db9ad1cb6cf9:42273 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-07T12:52:56,951 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [6ffa3290ff0642f38e16a34939172c4d=10347, 9b4a2a88ca58465eaca49c7915702585=12506, f57db30e45a547cda70dc68e5f8f353f=17994, e13a17f63de0463a8937d493267659eb=6027, d0aa12b1cc3c4891930307f48c5db453=6027, 80b233387f0346a8bbbc22183e238509=6027] 2024-11-07T12:52:56,951 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:33768 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741899_1085] {}] datanode.DataXceiver(331): 127.0.0.1:41865:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33768 dst: /127.0.0.1:41865 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:56,952 WARN [Thread-1046 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK] 2024-11-07T12:52:56,953 WARN [Thread-1046 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:56,953 WARN [Thread-1046 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]) is bad. 2024-11-07T12:52:56,953 WARN [Thread-1046 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741900_1086 2024-11-07T12:52:56,953 WARN [Thread-1046 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK] 2024-11-07T12:52:56,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741901_1087 (size=7089) 2024-11-07T12:52:56,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741901_1087 (size=7089) 2024-11-07T12:52:56,960 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/default/TestLogRolling-testLogRollOnDatanodeDeath/68427f795afd4a50d7e5b045fbc4e8bb/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-07T12:52:56,960 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/.tmp/info/dc6a787174d74ca1a1a08168c79ad0b7 2024-11-07T12:52:56,961 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. 2024-11-07T12:52:56,961 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 68427f795afd4a50d7e5b045fbc4e8bb: Waiting for close lock at 1730983976904Running coprocessor pre-close hooks at 1730983976904Disabling compacts and flushes for region at 1730983976904Disabling writes for close at 1730983976905 (+1 ms)Obtaining lock to block concurrent updates at 1730983976905Preparing flush snapshotting stores in 68427f795afd4a50d7e5b045fbc4e8bb at 1730983976905Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1730983976906 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. at 1730983976906Flushing 68427f795afd4a50d7e5b045fbc4e8bb/info: creating writer at 1730983976907 (+1 ms)Flushing 68427f795afd4a50d7e5b045fbc4e8bb/info: appending metadata at 1730983976911 (+4 ms)Flushing 68427f795afd4a50d7e5b045fbc4e8bb/info: closing flushed file at 1730983976911Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6735bcd9: reopening flushed file at 1730983976926 (+15 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 68427f795afd4a50d7e5b045fbc4e8bb in 29ms, sequenceid=78, compaction requested=true at 1730983976934 (+8 ms)Writing region close event to WAL at 1730983976956 (+22 ms)Running coprocessor post-close hooks at 1730983976961 (+5 ms)Closed at 1730983976961 2024-11-07T12:52:56,961 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1730983940055.68427f795afd4a50d7e5b045fbc4e8bb. 2024-11-07T12:52:56,980 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/.tmp/ns/43daaf9a11174de3ada6538f8fe73e21 is 43, key is default/ns:d/1730983939843/Put/seqid=0 2024-11-07T12:52:56,982 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1088 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44697 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:56,982 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:59984 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741902_1088] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data4]'}, localName='127.0.0.1:40639', datanodeUuid='732f757f-6769-4bb3-bc12-fa7b328c287d', xmitsInProgress=0}:Exception transferring block BP-551199049-172.17.0.2-1730983938264:blk_1073741902_1088 to mirror 127.0.0.1:44697 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:56,983 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741902_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40639,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK], DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:56,983 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741902_1088 2024-11-07T12:52:56,983 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:59984 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741902_1088] {}] datanode.BlockReceiver(316): Block 1073741902 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:52:56,983 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:59984 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741902_1088] {}] datanode.DataXceiver(331): 127.0.0.1:40639:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59984 dst: /127.0.0.1:40639 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:56,983 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK] 2024-11-07T12:52:56,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741903_1089 (size=5153) 2024-11-07T12:52:56,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741903_1089 (size=5153) 2024-11-07T12:52:56,988 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/.tmp/ns/43daaf9a11174de3ada6538f8fe73e21 2024-11-07T12:52:57,016 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/.tmp/table/b84e8637c2f44a5ab9d641db2d48ebdf is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1730983940431/Put/seqid=0 2024-11-07T12:52:57,018 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1090 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:57,018 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741904_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK], DatanodeInfoWithStorage[127.0.0.1:40639,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK]) is bad. 2024-11-07T12:52:57,018 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741904_1090 2024-11-07T12:52:57,019 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39857,DS-b92cddd8-6d84-42d7-884f-ee397d4786cd,DISK] 2024-11-07T12:52:57,020 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741905_1091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:57,020 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741905_1091 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK], DatanodeInfoWithStorage[127.0.0.1:41865,DS-d9b46f76-9e07-4c0f-bd92-3af1595ff7f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK]) is bad. 2024-11-07T12:52:57,020 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741905_1091 2024-11-07T12:52:57,021 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41001,DS-f579e5a1-55af-4789-8bb7-a3693c165127,DISK] 2024-11-07T12:52:57,023 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741906_1092 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44697 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:52:57,024 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-551199049-172.17.0.2-1730983938264:blk_1073741906_1092 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40639,DS-a4b2ed16-21f4-4448-b4cc-484a86cd4899,DISK], DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK]) is bad. 2024-11-07T12:52:57,024 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-551199049-172.17.0.2-1730983938264:blk_1073741906_1092 2024-11-07T12:52:57,024 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:59994 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741906_1092] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data4]'}, localName='127.0.0.1:40639', datanodeUuid='732f757f-6769-4bb3-bc12-fa7b328c287d', xmitsInProgress=0}:Exception transferring block BP-551199049-172.17.0.2-1730983938264:blk_1073741906_1092 to mirror 127.0.0.1:44697 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:57,024 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:59994 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741906_1092] {}] datanode.BlockReceiver(316): Block 1073741906 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-07T12:52:57,024 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46262148_22 at /127.0.0.1:59994 [Receiving block BP-551199049-172.17.0.2-1730983938264:blk_1073741906_1092] {}] datanode.DataXceiver(331): 127.0.0.1:40639:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59994 dst: /127.0.0.1:40639 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:52:57,024 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44697,DS-1aa386ef-ea55-4893-8b64-bae689b22210,DISK] 2024-11-07T12:52:57,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741907_1093 (size=5424) 2024-11-07T12:52:57,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741907_1093 (size=5424) 2024-11-07T12:52:57,035 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/.tmp/table/b84e8637c2f44a5ab9d641db2d48ebdf 2024-11-07T12:52:57,036 INFO [regionserver/db9ad1cb6cf9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-07T12:52:57,036 INFO [regionserver/db9ad1cb6cf9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-07T12:52:57,043 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/.tmp/info/dc6a787174d74ca1a1a08168c79ad0b7 as hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/info/dc6a787174d74ca1a1a08168c79ad0b7 2024-11-07T12:52:57,053 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/info/dc6a787174d74ca1a1a08168c79ad0b7, entries=10, sequenceid=11, filesize=6.9 K 2024-11-07T12:52:57,054 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/.tmp/ns/43daaf9a11174de3ada6538f8fe73e21 as hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/ns/43daaf9a11174de3ada6538f8fe73e21 2024-11-07T12:52:57,072 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/ns/43daaf9a11174de3ada6538f8fe73e21, entries=2, sequenceid=11, filesize=5.0 K 2024-11-07T12:52:57,073 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/.tmp/table/b84e8637c2f44a5ab9d641db2d48ebdf as hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/table/b84e8637c2f44a5ab9d641db2d48ebdf 2024-11-07T12:52:57,080 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.1730983960725 to hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/oldWALs/db9ad1cb6cf9%2C42011%2C1730983938993.1730983960725 2024-11-07T12:52:57,080 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/table/b84e8637c2f44a5ab9d641db2d48ebdf, entries=2, sequenceid=11, filesize=5.3 K 2024-11-07T12:52:57,082 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 176ms, sequenceid=11, compaction requested=false 2024-11-07T12:52:57,088 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-07T12:52:57,088 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:52:57,089 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:52:57,089 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730983976906Running coprocessor pre-close hooks at 1730983976906Disabling compacts and flushes for region at 1730983976906Disabling writes for close at 1730983976906Obtaining lock to block concurrent updates at 1730983976906Preparing flush snapshotting stores in 1588230740 at 1730983976906Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1730983976907 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1730983976926 (+19 ms)Flushing 1588230740/info: creating writer at 1730983976926Flushing 1588230740/info: appending metadata at 1730983976947 (+21 ms)Flushing 1588230740/info: closing flushed file at 1730983976948 (+1 ms)Flushing 1588230740/ns: creating writer at 1730983976965 (+17 ms)Flushing 1588230740/ns: appending metadata at 1730983976980 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1730983976980Flushing 1588230740/table: creating writer at 1730983976994 (+14 ms)Flushing 1588230740/table: appending metadata at 1730983977015 (+21 ms)Flushing 1588230740/table: closing flushed file at 1730983977015Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35592bed: reopening flushed file at 1730983977042 (+27 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a14fe65: reopening flushed file at 1730983977053 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51b1c392: reopening flushed file at 1730983977072 (+19 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 176ms, sequenceid=11, compaction requested=false at 1730983977082 (+10 ms)Writing region close event to WAL at 1730983977083 (+1 ms)Running coprocessor post-close hooks at 1730983977088 (+5 ms)Closed at 1730983977089 (+1 ms) 2024-11-07T12:52:57,089 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T12:52:57,106 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer(976): stopping server db9ad1cb6cf9,42011,1730983938993; all regions closed. 2024-11-07T12:52:57,109 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:57,109 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:57,109 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:57,109 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:57,109 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:52:57,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741895_1080 (size=825) 2024-11-07T12:52:57,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741895_1080 (size=825) 2024-11-07T12:52:57,260 INFO [regionserver/db9ad1cb6cf9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:52:57,355 INFO [regionserver/db9ad1cb6cf9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-07T12:52:57,355 INFO [regionserver/db9ad1cb6cf9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-07T12:52:58,021 INFO [regionserver/db9ad1cb6cf9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:52:58,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741875_1058 (size=12911) 2024-11-07T12:52:59,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-07T12:53:00,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:53:00,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-07T12:53:00,174 INFO [master/db9ad1cb6cf9:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-07T12:53:00,174 INFO [master/db9ad1cb6cf9:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-07T12:53:00,908 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 after 4001ms 2024-11-07T12:53:00,921 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta after 4001ms 2024-11-07T12:53:01,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:53:01,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741835_1011 (size=393) 2024-11-07T12:53:01,907 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-07T12:53:01,909 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/oldWALs 2024-11-07T12:53:01,909 INFO [RS:1;db9ad1cb6cf9:43605 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db9ad1cb6cf9%2C43605%2C1730983939942:(num 1730983940156) 2024-11-07T12:53:01,909 DEBUG [RS:1;db9ad1cb6cf9:43605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:53:01,909 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:53:01,909 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:53:01,909 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.ChoreService(370): Chore service for: regionserver/db9ad1cb6cf9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-07T12:53:01,909 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:53:01,909 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:53:01,909 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:53:01,909 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:53:01,909 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:53:01,910 INFO [RS:1;db9ad1cb6cf9:43605 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43605 2024-11-07T12:53:01,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43605-0x1001a4cd6100002, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db9ad1cb6cf9,43605,1730983939942 2024-11-07T12:53:01,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:53:01,912 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:53:01,913 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db9ad1cb6cf9,43605,1730983939942] 2024-11-07T12:53:01,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:01,914 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/db9ad1cb6cf9,43605,1730983939942 already deleted, retry=false 2024-11-07T12:53:01,914 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; db9ad1cb6cf9,43605,1730983939942 expired; onlineServers=1 2024-11-07T12:53:01,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:01,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:01,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:01,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:01,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:01,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:01,983 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:01,983 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:02,013 INFO [RS:1;db9ad1cb6cf9:43605 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:53:02,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43605-0x1001a4cd6100002, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:53:02,013 INFO [RS:1;db9ad1cb6cf9:43605 {}] regionserver.HRegionServer(1031): Exiting; stopping=db9ad1cb6cf9,43605,1730983939942; zookeeper connection closed. 2024-11-07T12:53:02,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43605-0x1001a4cd6100002, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:53:02,014 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6c252aab {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6c252aab 2024-11-07T12:53:02,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:53:02,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:53:02,110 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-07T12:53:02,113 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/oldWALs 2024-11-07T12:53:02,113 INFO [RS:0;db9ad1cb6cf9:42011 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db9ad1cb6cf9%2C42011%2C1730983938993.meta:.meta(num 1730983976907) 2024-11-07T12:53:02,113 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:02,114 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:02,114 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:02,114 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:02,114 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:02,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741890_1074 (size=14682) 2024-11-07T12:53:02,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741890_1074 (size=14682) 2024-11-07T12:53:02,118 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/oldWALs 2024-11-07T12:53:02,118 INFO [RS:0;db9ad1cb6cf9:42011 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db9ad1cb6cf9%2C42011%2C1730983938993:(num 1730983976671) 2024-11-07T12:53:02,118 DEBUG [RS:0;db9ad1cb6cf9:42011 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:53:02,118 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:53:02,118 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:53:02,118 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.ChoreService(370): Chore service for: regionserver/db9ad1cb6cf9:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-07T12:53:02,118 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:53:02,118 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:53:02,119 INFO [RS:0;db9ad1cb6cf9:42011 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42011 2024-11-07T12:53:02,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db9ad1cb6cf9,42011,1730983938993 2024-11-07T12:53:02,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:53:02,121 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:53:02,122 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db9ad1cb6cf9,42011,1730983938993] 2024-11-07T12:53:02,124 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/db9ad1cb6cf9,42011,1730983938993 already deleted, retry=false 2024-11-07T12:53:02,124 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; db9ad1cb6cf9,42011,1730983938993 expired; onlineServers=0 2024-11-07T12:53:02,124 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'db9ad1cb6cf9,42273,1730983938946' ***** 2024-11-07T12:53:02,124 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T12:53:02,124 INFO [M:0;db9ad1cb6cf9:42273 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:53:02,124 INFO [M:0;db9ad1cb6cf9:42273 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:53:02,124 DEBUG [M:0;db9ad1cb6cf9:42273 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T12:53:02,124 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T12:53:02,124 DEBUG [M:0;db9ad1cb6cf9:42273 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T12:53:02,124 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730983939167 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730983939167,5,FailOnTimeoutGroup] 2024-11-07T12:53:02,124 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730983939168 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730983939168,5,FailOnTimeoutGroup] 2024-11-07T12:53:02,125 INFO [M:0;db9ad1cb6cf9:42273 {}] hbase.ChoreService(370): Chore service for: master/db9ad1cb6cf9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-07T12:53:02,125 INFO [M:0;db9ad1cb6cf9:42273 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:53:02,125 DEBUG [M:0;db9ad1cb6cf9:42273 {}] master.HMaster(1795): Stopping service threads 2024-11-07T12:53:02,125 INFO [M:0;db9ad1cb6cf9:42273 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T12:53:02,125 INFO [M:0;db9ad1cb6cf9:42273 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:53:02,125 INFO [M:0;db9ad1cb6cf9:42273 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T12:53:02,125 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T12:53:02,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:53:02,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:02,126 DEBUG [M:0;db9ad1cb6cf9:42273 {}] zookeeper.ZKUtil(347): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T12:53:02,126 WARN [M:0;db9ad1cb6cf9:42273 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T12:53:02,127 INFO [M:0;db9ad1cb6cf9:42273 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/.lastflushedseqids 2024-11-07T12:53:02,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741908_1094 (size=130) 2024-11-07T12:53:02,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741908_1094 (size=130) 2024-11-07T12:53:02,133 INFO [M:0;db9ad1cb6cf9:42273 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-07T12:53:02,133 INFO [M:0;db9ad1cb6cf9:42273 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T12:53:02,133 DEBUG [M:0;db9ad1cb6cf9:42273 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:53:02,133 INFO [M:0;db9ad1cb6cf9:42273 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:53:02,133 DEBUG [M:0;db9ad1cb6cf9:42273 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:53:02,133 DEBUG [M:0;db9ad1cb6cf9:42273 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:53:02,133 DEBUG [M:0;db9ad1cb6cf9:42273 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:53:02,133 INFO [M:0;db9ad1cb6cf9:42273 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-07T12:53:02,149 DEBUG [M:0;db9ad1cb6cf9:42273 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/150d601ac3f142819a28841bc286b9a3 is 82, key is hbase:meta,,1/info:regioninfo/1730983939826/Put/seqid=0 2024-11-07T12:53:02,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741909_1095 (size=5672) 2024-11-07T12:53:02,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741909_1095 (size=5672) 2024-11-07T12:53:02,155 INFO [M:0;db9ad1cb6cf9:42273 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/150d601ac3f142819a28841bc286b9a3 2024-11-07T12:53:02,175 DEBUG [M:0;db9ad1cb6cf9:42273 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8258099986f24c71958f47940244f19d is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1730983940435/Put/seqid=0 2024-11-07T12:53:02,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741910_1096 (size=6256) 2024-11-07T12:53:02,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741910_1096 (size=6256) 2024-11-07T12:53:02,181 INFO [M:0;db9ad1cb6cf9:42273 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8258099986f24c71958f47940244f19d 2024-11-07T12:53:02,186 INFO [M:0;db9ad1cb6cf9:42273 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8258099986f24c71958f47940244f19d 2024-11-07T12:53:02,201 DEBUG [M:0;db9ad1cb6cf9:42273 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e860ad84d5974fe792cec599a75eff62 is 69, key is db9ad1cb6cf9,42011,1730983938993/rs:state/1730983939241/Put/seqid=0 2024-11-07T12:53:02,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741911_1097 (size=5224) 2024-11-07T12:53:02,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741911_1097 (size=5224) 2024-11-07T12:53:02,207 INFO [M:0;db9ad1cb6cf9:42273 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e860ad84d5974fe792cec599a75eff62 2024-11-07T12:53:02,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:53:02,222 INFO [RS:0;db9ad1cb6cf9:42011 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:53:02,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42011-0x1001a4cd6100001, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:53:02,222 INFO [RS:0;db9ad1cb6cf9:42011 {}] regionserver.HRegionServer(1031): Exiting; stopping=db9ad1cb6cf9,42011,1730983938993; zookeeper connection closed. 2024-11-07T12:53:02,223 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6c81ac21 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6c81ac21 2024-11-07T12:53:02,223 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-07T12:53:02,226 DEBUG [M:0;db9ad1cb6cf9:42273 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/133579979e6d4e21a60073c7a4dd3a83 is 52, key is load_balancer_on/state:d/1730983939925/Put/seqid=0 2024-11-07T12:53:02,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741912_1098 (size=5056) 2024-11-07T12:53:02,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741912_1098 (size=5056) 2024-11-07T12:53:02,231 INFO [M:0;db9ad1cb6cf9:42273 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/133579979e6d4e21a60073c7a4dd3a83 2024-11-07T12:53:02,237 DEBUG [M:0;db9ad1cb6cf9:42273 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/150d601ac3f142819a28841bc286b9a3 as hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/150d601ac3f142819a28841bc286b9a3 2024-11-07T12:53:02,242 INFO [M:0;db9ad1cb6cf9:42273 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/150d601ac3f142819a28841bc286b9a3, entries=8, sequenceid=60, filesize=5.5 K 2024-11-07T12:53:02,243 DEBUG [M:0;db9ad1cb6cf9:42273 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8258099986f24c71958f47940244f19d as hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8258099986f24c71958f47940244f19d 2024-11-07T12:53:02,248 INFO [M:0;db9ad1cb6cf9:42273 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8258099986f24c71958f47940244f19d 2024-11-07T12:53:02,248 INFO [M:0;db9ad1cb6cf9:42273 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8258099986f24c71958f47940244f19d, entries=6, sequenceid=60, filesize=6.1 K 2024-11-07T12:53:02,249 DEBUG [M:0;db9ad1cb6cf9:42273 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e860ad84d5974fe792cec599a75eff62 as hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e860ad84d5974fe792cec599a75eff62 2024-11-07T12:53:02,254 INFO [M:0;db9ad1cb6cf9:42273 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e860ad84d5974fe792cec599a75eff62, entries=2, sequenceid=60, filesize=5.1 K 2024-11-07T12:53:02,255 DEBUG [M:0;db9ad1cb6cf9:42273 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/133579979e6d4e21a60073c7a4dd3a83 as hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/133579979e6d4e21a60073c7a4dd3a83 2024-11-07T12:53:02,259 INFO [M:0;db9ad1cb6cf9:42273 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/133579979e6d4e21a60073c7a4dd3a83, entries=1, sequenceid=60, filesize=4.9 K 2024-11-07T12:53:02,261 INFO [M:0;db9ad1cb6cf9:42273 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=60, compaction requested=false 2024-11-07T12:53:02,262 INFO [M:0;db9ad1cb6cf9:42273 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:53:02,262 DEBUG [M:0;db9ad1cb6cf9:42273 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730983982133Disabling compacts and flushes for region at 1730983982133Disabling writes for close at 1730983982133Obtaining lock to block concurrent updates at 1730983982133Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1730983982133Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1730983982134 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1730983982134Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1730983982134Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1730983982149 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1730983982149Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1730983982160 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1730983982175 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1730983982175Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1730983982186 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1730983982201 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1730983982201Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1730983982212 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1730983982226 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1730983982226Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24cf7391: reopening flushed file at 1730983982236 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8707b7f: reopening flushed file at 1730983982242 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c9ced74: reopening flushed file at 1730983982248 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5888019c: reopening flushed file at 1730983982254 (+6 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=60, compaction requested=false at 1730983982261 (+7 ms)Writing region close event to WAL at 1730983982262 (+1 ms)Closed at 1730983982262 2024-11-07T12:53:02,262 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:02,263 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:02,263 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:02,263 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:02,263 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:02,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41865 is added to blk_1073741889_1072 (size=1045) 2024-11-07T12:53:02,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40639 is added to blk_1073741889_1072 (size=1045) 2024-11-07T12:53:02,485 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:53:02,499 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:02,499 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:02,500 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:02,500 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:02,500 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:02,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:02,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:02,505 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:02,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:02,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:03,066 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4acd332c {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-551199049-172.17.0.2-1730983938264:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:39857,null,null]) java.net.ConnectException: Call From db9ad1cb6cf9/172.17.0.2 to localhost:36397 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-07T12:53:03,191 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/WALs/db9ad1cb6cf9,42273,1730983938946/db9ad1cb6cf9%2C42273%2C1730983938946.1730983939088 to hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/oldWALs/db9ad1cb6cf9%2C42273%2C1730983938946.1730983939088 2024-11-07T12:53:03,194 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/MasterData/oldWALs/db9ad1cb6cf9%2C42273%2C1730983938946.1730983939088 to hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/oldWALs/db9ad1cb6cf9%2C42273%2C1730983938946.1730983939088$masterlocalwal$ 2024-11-07T12:53:03,194 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:53:03,194 INFO [M:0;db9ad1cb6cf9:42273 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-07T12:53:03,194 INFO [M:0;db9ad1cb6cf9:42273 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42273 2024-11-07T12:53:03,194 INFO [M:0;db9ad1cb6cf9:42273 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:53:03,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:53:03,297 INFO [M:0;db9ad1cb6cf9:42273 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:53:03,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42273-0x1001a4cd6100000, quorum=127.0.0.1:58729, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:53:03,299 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2c79190f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:53:03,300 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70a14b1c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:53:03,300 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:53:03,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39eb7ad6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:53:03,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6869cf12{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.log.dir/,STOPPED} 2024-11-07T12:53:03,301 WARN [BP-551199049-172.17.0.2-1730983938264 heartbeating to localhost/127.0.0.1:37737 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:53:03,301 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:53:03,301 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:53:03,301 WARN [BP-551199049-172.17.0.2-1730983938264 heartbeating to localhost/127.0.0.1:37737 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-551199049-172.17.0.2-1730983938264 (Datanode Uuid 732f757f-6769-4bb3-bc12-fa7b328c287d) service to localhost/127.0.0.1:37737 2024-11-07T12:53:03,301 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3296f3cf {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-551199049-172.17.0.2-1730983938264:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:39857,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:36397 , LocalHost:localPort db9ad1cb6cf9/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-07T12:53:03,302 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3296f3cf {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-551199049-172.17.0.2-1730983938264:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:40639,null,null], DatanodeInfoWithStorage[127.0.0.1:39857,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-551199049-172.17.0.2-1730983938264 2024-11-07T12:53:03,302 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data3/current/BP-551199049-172.17.0.2-1730983938264 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:53:03,302 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3296f3cf {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-551199049-172.17.0.2-1730983938264:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:40639,null,null]) java.io.IOException: No block pool offer service for bpid=BP-551199049-172.17.0.2-1730983938264 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:03,302 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3296f3cf {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-551199049-172.17.0.2-1730983938264:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:39857,null,null]) java.io.IOException: No block pool offer service for bpid=BP-551199049-172.17.0.2-1730983938264 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:03,302 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data4/current/BP-551199049-172.17.0.2-1730983938264 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:53:03,302 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3296f3cf {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-551199049-172.17.0.2-1730983938264:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:40639,null,null], DatanodeInfoWithStorage[127.0.0.1:39857,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-551199049-172.17.0.2-1730983938264:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:40639,null,null], DatanodeInfoWithStorage[127.0.0.1:39857,null,null]] 2024-11-07T12:53:03,302 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:53:03,304 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@730725ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:53:03,305 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20e1b523{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:53:03,305 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:53:03,305 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d0f4a9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:53:03,305 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21ddd8f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.log.dir/,STOPPED} 2024-11-07T12:53:03,306 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:53:03,306 WARN [BP-551199049-172.17.0.2-1730983938264 heartbeating to localhost/127.0.0.1:37737 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:53:03,306 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:53:03,306 WARN [BP-551199049-172.17.0.2-1730983938264 heartbeating to localhost/127.0.0.1:37737 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-551199049-172.17.0.2-1730983938264 (Datanode Uuid 4be56dc1-2e38-42da-b3d0-0feab1f74a00) service to localhost/127.0.0.1:37737 2024-11-07T12:53:03,307 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data5/current/BP-551199049-172.17.0.2-1730983938264 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:53:03,307 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/cluster_2fc7b0bb-4cf0-fda2-6136-fd5400d22da3/data/data6/current/BP-551199049-172.17.0.2-1730983938264 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:53:03,307 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:53:03,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c00ef51{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:53:03,314 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@73b9709e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:53:03,314 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:53:03,314 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42b52d44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:53:03,314 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@142d24a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.log.dir/,STOPPED} 2024-11-07T12:53:03,322 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-07T12:53:03,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-07T12:53:03,358 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f3710bf44c0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:34841 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:37737 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f3710bf44c0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37737 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37737 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:37737 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37737 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34841 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37737 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37737 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37737 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37737 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37737 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37737 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=83 (was 80) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7963 (was 8574) 2024-11-07T12:53:03,366 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=83, ProcessCount=11, AvailableMemoryMB=7963 2024-11-07T12:53:03,366 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T12:53:03,366 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.log.dir so I do NOT create it in target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5 2024-11-07T12:53:03,366 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e07ddeaa-8cbc-0572-2a00-b0bf2dd5218a/hadoop.tmp.dir so I do NOT create it in target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5 2024-11-07T12:53:03,366 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2, deleteOnExit=true 2024-11-07T12:53:03,366 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-07T12:53:03,366 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/test.cache.data in system properties and HBase conf 2024-11-07T12:53:03,366 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T12:53:03,366 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.log.dir in system properties and HBase conf 2024-11-07T12:53:03,366 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T12:53:03,366 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T12:53:03,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-07T12:53:03,367 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T12:53:03,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:53:03,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:53:03,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T12:53:03,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:53:03,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T12:53:03,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T12:53:03,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:53:03,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:53:03,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T12:53:03,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/nfs.dump.dir in system properties and HBase conf 2024-11-07T12:53:03,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/java.io.tmpdir in system properties and HBase conf 2024-11-07T12:53:03,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:53:03,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T12:53:03,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T12:53:03,381 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:53:03,445 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:53:03,449 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:53:03,450 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:53:03,450 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:53:03,450 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:53:03,451 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:53:03,451 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73ee6be8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:53:03,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a0844a7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:53:03,567 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5fe59776{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/java.io.tmpdir/jetty-localhost-44951-hadoop-hdfs-3_4_1-tests_jar-_-any-9498730067182149288/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:53:03,567 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6495f923{HTTP/1.1, (http/1.1)}{localhost:44951} 2024-11-07T12:53:03,567 INFO [Time-limited test {}] server.Server(415): Started @148836ms 2024-11-07T12:53:03,580 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:53:03,645 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:53:03,649 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:53:03,649 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:53:03,649 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:53:03,649 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:53:03,650 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f0827f7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:53:03,650 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@109832d2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:53:03,765 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6bfebe40{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/java.io.tmpdir/jetty-localhost-41353-hadoop-hdfs-3_4_1-tests_jar-_-any-2186834819096604937/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:53:03,765 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78e57521{HTTP/1.1, (http/1.1)}{localhost:41353} 2024-11-07T12:53:03,766 INFO [Time-limited test {}] server.Server(415): Started @149035ms 2024-11-07T12:53:03,767 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:53:03,797 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:53:03,801 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:53:03,801 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:53:03,801 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:53:03,801 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:53:03,802 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2efee71d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:53:03,802 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@650740c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:53:03,850 WARN [Thread-1184 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/data/data1/current/BP-808981288-172.17.0.2-1730983983398/current, will proceed with Du for space computation calculation, 2024-11-07T12:53:03,850 WARN [Thread-1185 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/data/data2/current/BP-808981288-172.17.0.2-1730983983398/current, will proceed with Du for space computation calculation, 2024-11-07T12:53:03,866 WARN [Thread-1163 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:53:03,869 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15f5de3590d48c03 with lease ID 0x476c1ac29f0637c8: Processing first storage report for DS-669840dc-6e29-46a5-b7f4-f34832af94cd from datanode DatanodeRegistration(127.0.0.1:35985, datanodeUuid=dff7547a-f435-4206-b7a9-252ac4d195cb, infoPort=43875, infoSecurePort=0, ipcPort=40773, storageInfo=lv=-57;cid=testClusterID;nsid=683899971;c=1730983983398) 2024-11-07T12:53:03,869 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15f5de3590d48c03 with lease ID 0x476c1ac29f0637c8: from storage DS-669840dc-6e29-46a5-b7f4-f34832af94cd node DatanodeRegistration(127.0.0.1:35985, datanodeUuid=dff7547a-f435-4206-b7a9-252ac4d195cb, infoPort=43875, infoSecurePort=0, ipcPort=40773, storageInfo=lv=-57;cid=testClusterID;nsid=683899971;c=1730983983398), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:53:03,869 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15f5de3590d48c03 with lease ID 0x476c1ac29f0637c8: Processing first storage report for DS-c681a225-47d7-4f8a-b17a-3bf4cd9dfb3a from datanode DatanodeRegistration(127.0.0.1:35985, datanodeUuid=dff7547a-f435-4206-b7a9-252ac4d195cb, infoPort=43875, infoSecurePort=0, ipcPort=40773, storageInfo=lv=-57;cid=testClusterID;nsid=683899971;c=1730983983398) 2024-11-07T12:53:03,869 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15f5de3590d48c03 with lease ID 0x476c1ac29f0637c8: from storage DS-c681a225-47d7-4f8a-b17a-3bf4cd9dfb3a node DatanodeRegistration(127.0.0.1:35985, datanodeUuid=dff7547a-f435-4206-b7a9-252ac4d195cb, infoPort=43875, infoSecurePort=0, ipcPort=40773, storageInfo=lv=-57;cid=testClusterID;nsid=683899971;c=1730983983398), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:53:03,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:03,918 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6987884{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/java.io.tmpdir/jetty-localhost-42145-hadoop-hdfs-3_4_1-tests_jar-_-any-2333137595893255485/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:53:03,918 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@365c2477{HTTP/1.1, (http/1.1)}{localhost:42145} 2024-11-07T12:53:03,918 INFO [Time-limited test {}] server.Server(415): Started @149187ms 2024-11-07T12:53:03,919 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:53:03,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:04,003 WARN [Thread-1210 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/data/data3/current/BP-808981288-172.17.0.2-1730983983398/current, will proceed with Du for space computation calculation, 2024-11-07T12:53:04,003 WARN [Thread-1211 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/data/data4/current/BP-808981288-172.17.0.2-1730983983398/current, will proceed with Du for space computation calculation, 2024-11-07T12:53:04,019 WARN [Thread-1199 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:53:04,021 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x21f47409677b233d with lease ID 0x476c1ac29f0637c9: Processing first storage report for DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c from datanode DatanodeRegistration(127.0.0.1:36711, datanodeUuid=71579155-7fe1-45be-8dce-23d19a212a46, infoPort=46693, infoSecurePort=0, ipcPort=38933, storageInfo=lv=-57;cid=testClusterID;nsid=683899971;c=1730983983398) 2024-11-07T12:53:04,021 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x21f47409677b233d with lease ID 0x476c1ac29f0637c9: from storage DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c node DatanodeRegistration(127.0.0.1:36711, datanodeUuid=71579155-7fe1-45be-8dce-23d19a212a46, infoPort=46693, infoSecurePort=0, ipcPort=38933, storageInfo=lv=-57;cid=testClusterID;nsid=683899971;c=1730983983398), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:53:04,021 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x21f47409677b233d with lease ID 0x476c1ac29f0637c9: Processing first storage report for DS-e097d350-4ac6-47d3-b476-f9e1fa1431c3 from datanode DatanodeRegistration(127.0.0.1:36711, datanodeUuid=71579155-7fe1-45be-8dce-23d19a212a46, infoPort=46693, infoSecurePort=0, ipcPort=38933, storageInfo=lv=-57;cid=testClusterID;nsid=683899971;c=1730983983398) 2024-11-07T12:53:04,021 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x21f47409677b233d with lease ID 0x476c1ac29f0637c9: from storage DS-e097d350-4ac6-47d3-b476-f9e1fa1431c3 node DatanodeRegistration(127.0.0.1:36711, datanodeUuid=71579155-7fe1-45be-8dce-23d19a212a46, infoPort=46693, infoSecurePort=0, ipcPort=38933, storageInfo=lv=-57;cid=testClusterID;nsid=683899971;c=1730983983398), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:53:04,042 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5 2024-11-07T12:53:04,045 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/zookeeper_0, clientPort=61466, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T12:53:04,045 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61466 2024-11-07T12:53:04,046 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:53:04,047 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:53:04,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36711 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:53:04,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35985 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:53:04,056 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e with version=8 2024-11-07T12:53:04,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/hbase-staging 2024-11-07T12:53:04,058 INFO [Time-limited test {}] client.ConnectionUtils(128): master/db9ad1cb6cf9:0 server-side Connection retries=45 2024-11-07T12:53:04,059 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:53:04,059 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:53:04,059 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:53:04,059 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:53:04,059 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:53:04,059 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-07T12:53:04,059 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:53:04,060 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43555 2024-11-07T12:53:04,061 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43555 connecting to ZooKeeper ensemble=127.0.0.1:61466 2024-11-07T12:53:04,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:435550x0, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:53:04,066 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43555-0x1001a4d864a0000 connected 2024-11-07T12:53:04,079 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:53:04,081 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:53:04,083 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:53:04,083 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e, hbase.cluster.distributed=false 2024-11-07T12:53:04,085 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:53:04,085 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43555 2024-11-07T12:53:04,085 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43555 2024-11-07T12:53:04,085 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43555 2024-11-07T12:53:04,086 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43555 2024-11-07T12:53:04,086 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43555 2024-11-07T12:53:04,101 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/db9ad1cb6cf9:0 server-side Connection retries=45 2024-11-07T12:53:04,102 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:53:04,102 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:53:04,102 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:53:04,102 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:53:04,102 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:53:04,102 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:53:04,102 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:53:04,103 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41419 2024-11-07T12:53:04,104 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41419 connecting to ZooKeeper ensemble=127.0.0.1:61466 2024-11-07T12:53:04,104 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:53:04,106 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:53:04,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:414190x0, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:53:04,110 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:414190x0, quorum=127.0.0.1:61466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:53:04,110 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41419-0x1001a4d864a0001 connected 2024-11-07T12:53:04,111 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:53:04,111 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:53:04,112 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:53:04,113 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:53:04,113 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41419 2024-11-07T12:53:04,113 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41419 2024-11-07T12:53:04,113 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41419 2024-11-07T12:53:04,114 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41419 2024-11-07T12:53:04,114 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41419 2024-11-07T12:53:04,125 DEBUG [M:0;db9ad1cb6cf9:43555 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db9ad1cb6cf9:43555 2024-11-07T12:53:04,126 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/db9ad1cb6cf9,43555,1730983984058 2024-11-07T12:53:04,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:53:04,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:53:04,128 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db9ad1cb6cf9,43555,1730983984058 2024-11-07T12:53:04,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:53:04,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:04,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:04,130 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:53:04,130 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db9ad1cb6cf9,43555,1730983984058 from backup master directory 2024-11-07T12:53:04,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db9ad1cb6cf9,43555,1730983984058 2024-11-07T12:53:04,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:53:04,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:53:04,132 WARN [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:53:04,132 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db9ad1cb6cf9,43555,1730983984058 2024-11-07T12:53:04,137 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/hbase.id] with ID: 8a38e8d8-a875-49bc-b0a7-792235dd49fd 2024-11-07T12:53:04,137 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/.tmp/hbase.id 2024-11-07T12:53:04,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36711 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:53:04,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35985 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:53:04,143 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/.tmp/hbase.id]:[hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/hbase.id] 2024-11-07T12:53:04,155 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:53:04,155 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-07T12:53:04,156 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-07T12:53:04,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:04,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:04,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36711 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:53:04,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35985 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:53:04,165 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:53:04,166 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T12:53:04,166 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:53:04,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35985 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:53:04,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36711 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:53:04,174 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store 2024-11-07T12:53:04,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36711 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:53:04,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35985 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:53:04,182 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:53:04,182 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:53:04,183 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:53:04,183 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:53:04,183 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:53:04,183 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:53:04,183 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:53:04,183 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730983984182Disabling compacts and flushes for region at 1730983984182Disabling writes for close at 1730983984183 (+1 ms)Writing region close event to WAL at 1730983984183Closed at 1730983984183 2024-11-07T12:53:04,184 WARN [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/.initializing 2024-11-07T12:53:04,184 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/WALs/db9ad1cb6cf9,43555,1730983984058 2024-11-07T12:53:04,186 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C43555%2C1730983984058, suffix=, logDir=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/WALs/db9ad1cb6cf9,43555,1730983984058, archiveDir=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/oldWALs, maxLogs=10 2024-11-07T12:53:04,187 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C43555%2C1730983984058.1730983984186 2024-11-07T12:53:04,191 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/WALs/db9ad1cb6cf9,43555,1730983984058/db9ad1cb6cf9%2C43555%2C1730983984058.1730983984186 2024-11-07T12:53:04,192 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46693:46693),(127.0.0.1/127.0.0.1:43875:43875)] 2024-11-07T12:53:04,192 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:53:04,192 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:53:04,193 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:04,193 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:04,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:04,197 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T12:53:04,197 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:04,198 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:04,198 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:04,199 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T12:53:04,199 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:04,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:53:04,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:04,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T12:53:04,201 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:04,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:53:04,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:04,202 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T12:53:04,203 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:04,203 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:53:04,203 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:04,204 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:04,204 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:04,205 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:04,205 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:04,206 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T12:53:04,207 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:04,209 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:53:04,209 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=692664, jitterRate=-0.11923255026340485}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T12:53:04,210 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1730983984193Initializing all the Stores at 1730983984193Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983984194 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983984196 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983984196Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983984196Cleaning up temporary data from old regions at 1730983984205 (+9 ms)Region opened successfully at 1730983984210 (+5 ms) 2024-11-07T12:53:04,212 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T12:53:04,215 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6877dd73, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9ad1cb6cf9/172.17.0.2:0 2024-11-07T12:53:04,216 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-07T12:53:04,216 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T12:53:04,216 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T12:53:04,217 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T12:53:04,217 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-07T12:53:04,217 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-07T12:53:04,217 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T12:53:04,219 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T12:53:04,220 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T12:53:04,222 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-07T12:53:04,222 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T12:53:04,223 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T12:53:04,224 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-07T12:53:04,224 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T12:53:04,225 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T12:53:04,226 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-07T12:53:04,227 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T12:53:04,229 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T12:53:04,230 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T12:53:04,232 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T12:53:04,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:53:04,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:53:04,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:04,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:04,234 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=db9ad1cb6cf9,43555,1730983984058, sessionid=0x1001a4d864a0000, setting cluster-up flag (Was=false) 2024-11-07T12:53:04,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:04,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:04,242 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T12:53:04,243 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9ad1cb6cf9,43555,1730983984058 2024-11-07T12:53:04,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:04,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:04,250 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T12:53:04,251 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9ad1cb6cf9,43555,1730983984058 2024-11-07T12:53:04,252 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-07T12:53:04,254 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-07T12:53:04,254 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-07T12:53:04,254 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T12:53:04,254 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db9ad1cb6cf9,43555,1730983984058 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T12:53:04,256 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:53:04,256 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:53:04,256 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:53:04,256 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:53:04,256 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db9ad1cb6cf9:0, corePoolSize=10, maxPoolSize=10 2024-11-07T12:53:04,256 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:04,256 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:53:04,256 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:04,257 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730984014257 2024-11-07T12:53:04,257 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T12:53:04,257 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T12:53:04,257 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T12:53:04,257 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T12:53:04,257 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T12:53:04,257 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T12:53:04,257 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:04,258 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:53:04,258 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-07T12:53:04,258 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T12:53:04,258 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T12:53:04,258 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T12:53:04,259 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:04,259 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T12:53:04,260 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T12:53:04,260 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T12:53:04,261 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730983984261,5,FailOnTimeoutGroup] 2024-11-07T12:53:04,261 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730983984261,5,FailOnTimeoutGroup] 2024-11-07T12:53:04,261 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:04,261 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T12:53:04,261 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:04,261 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:04,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35985 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:53:04,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36711 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:53:04,316 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer(746): ClusterId : 8a38e8d8-a875-49bc-b0a7-792235dd49fd 2024-11-07T12:53:04,316 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:53:04,319 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:53:04,319 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:53:04,321 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:53:04,321 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38f38d47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9ad1cb6cf9/172.17.0.2:0 2024-11-07T12:53:04,333 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db9ad1cb6cf9:41419 2024-11-07T12:53:04,333 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T12:53:04,333 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T12:53:04,333 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T12:53:04,334 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer(2659): reportForDuty to master=db9ad1cb6cf9,43555,1730983984058 with port=41419, startcode=1730983984101 2024-11-07T12:53:04,334 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:53:04,336 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49591, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:53:04,336 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43555 {}] master.ServerManager(363): Checking decommissioned status of RegionServer db9ad1cb6cf9,41419,1730983984101 2024-11-07T12:53:04,337 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43555 {}] master.ServerManager(517): Registering regionserver=db9ad1cb6cf9,41419,1730983984101 2024-11-07T12:53:04,338 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e 2024-11-07T12:53:04,338 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33619 2024-11-07T12:53:04,338 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T12:53:04,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:53:04,342 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] zookeeper.ZKUtil(111): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db9ad1cb6cf9,41419,1730983984101 2024-11-07T12:53:04,342 WARN [RS:0;db9ad1cb6cf9:41419 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:53:04,342 INFO [RS:0;db9ad1cb6cf9:41419 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:53:04,342 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101 2024-11-07T12:53:04,342 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db9ad1cb6cf9,41419,1730983984101] 2024-11-07T12:53:04,345 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:53:04,346 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:53:04,347 INFO [RS:0;db9ad1cb6cf9:41419 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:53:04,347 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:04,347 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T12:53:04,348 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T12:53:04,348 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:04,348 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:04,348 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:04,348 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:04,348 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:04,348 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:04,348 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db9ad1cb6cf9:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:53:04,348 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:04,348 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:04,348 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:04,348 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:04,348 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:04,348 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:04,348 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db9ad1cb6cf9:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:53:04,348 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:53:04,350 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:04,350 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:04,350 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:04,350 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:04,350 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:04,350 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,41419,1730983984101-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:53:04,365 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:53:04,365 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,41419,1730983984101-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:04,365 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:04,365 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.Replication(171): db9ad1cb6cf9,41419,1730983984101 started 2024-11-07T12:53:04,380 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:04,380 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer(1482): Serving as db9ad1cb6cf9,41419,1730983984101, RpcServer on db9ad1cb6cf9/172.17.0.2:41419, sessionid=0x1001a4d864a0001 2024-11-07T12:53:04,380 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:53:04,380 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db9ad1cb6cf9,41419,1730983984101 2024-11-07T12:53:04,380 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9ad1cb6cf9,41419,1730983984101' 2024-11-07T12:53:04,381 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:53:04,381 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:53:04,382 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:53:04,382 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:53:04,382 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db9ad1cb6cf9,41419,1730983984101 2024-11-07T12:53:04,382 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9ad1cb6cf9,41419,1730983984101' 2024-11-07T12:53:04,382 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:53:04,382 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:53:04,383 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:53:04,383 INFO [RS:0;db9ad1cb6cf9:41419 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:53:04,383 INFO [RS:0;db9ad1cb6cf9:41419 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:53:04,485 INFO [RS:0;db9ad1cb6cf9:41419 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C41419%2C1730983984101, suffix=, logDir=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101, archiveDir=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/oldWALs, maxLogs=32 2024-11-07T12:53:04,486 INFO [RS:0;db9ad1cb6cf9:41419 {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C41419%2C1730983984101.1730983984485 2024-11-07T12:53:04,491 INFO [RS:0;db9ad1cb6cf9:41419 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983984485 2024-11-07T12:53:04,492 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46693:46693),(127.0.0.1/127.0.0.1:43875:43875)] 2024-11-07T12:53:04,669 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-07T12:53:04,670 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e 2024-11-07T12:53:04,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35985 is added to blk_1073741833_1009 (size=32) 2024-11-07T12:53:04,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36711 is added to blk_1073741833_1009 (size=32) 2024-11-07T12:53:04,678 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:53:04,679 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:53:04,680 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:53:04,680 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:04,681 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:04,681 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:53:04,682 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:53:04,682 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:04,683 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:04,683 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:53:04,684 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:53:04,684 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:04,684 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:04,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:53:04,686 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:53:04,686 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:04,686 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:04,686 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:53:04,687 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740 2024-11-07T12:53:04,687 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740 2024-11-07T12:53:04,688 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:53:04,688 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:53:04,689 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:53:04,690 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:53:04,692 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:53:04,692 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=702738, jitterRate=-0.10642305016517639}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:53:04,693 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1730983984678Initializing all the Stores at 1730983984679 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983984679Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983984679Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983984679Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983984679Cleaning up temporary data from old regions at 1730983984688 (+9 ms)Region opened successfully at 1730983984692 (+4 ms) 2024-11-07T12:53:04,693 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:53:04,693 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:53:04,693 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:53:04,693 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:53:04,693 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:53:04,693 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:53:04,693 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730983984693Disabling compacts and flushes for region at 1730983984693Disabling writes for close at 1730983984693Writing region close event to WAL at 1730983984693Closed at 1730983984693 2024-11-07T12:53:04,694 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:53:04,694 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-07T12:53:04,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T12:53:04,696 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:53:04,697 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T12:53:04,847 DEBUG [db9ad1cb6cf9:43555 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-07T12:53:04,848 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db9ad1cb6cf9,41419,1730983984101 2024-11-07T12:53:04,849 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9ad1cb6cf9,41419,1730983984101, state=OPENING 2024-11-07T12:53:04,851 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T12:53:04,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:04,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:04,853 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:53:04,853 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:53:04,853 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:53:04,853 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,41419,1730983984101}] 2024-11-07T12:53:04,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:04,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:05,005 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T12:53:05,007 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42511, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T12:53:05,011 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-07T12:53:05,011 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:53:05,012 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C41419%2C1730983984101.meta, suffix=.meta, logDir=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101, archiveDir=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/oldWALs, maxLogs=32 2024-11-07T12:53:05,013 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C41419%2C1730983984101.meta.1730983985013.meta 2024-11-07T12:53:05,018 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.meta.1730983985013.meta 2024-11-07T12:53:05,019 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43875:43875),(127.0.0.1/127.0.0.1:46693:46693)] 2024-11-07T12:53:05,019 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:53:05,020 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T12:53:05,020 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T12:53:05,020 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T12:53:05,020 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T12:53:05,020 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:53:05,020 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-07T12:53:05,020 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-07T12:53:05,021 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:53:05,022 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:53:05,022 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:05,023 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:05,023 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:53:05,024 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:53:05,024 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:05,024 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:05,024 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:53:05,025 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:53:05,025 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:05,025 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:05,025 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:53:05,026 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:53:05,026 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:05,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:05,026 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:53:05,027 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740 2024-11-07T12:53:05,028 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740 2024-11-07T12:53:05,029 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:53:05,029 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:53:05,030 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:53:05,031 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:53:05,032 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=690388, jitterRate=-0.122126504778862}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:53:05,032 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-07T12:53:05,033 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1730983985020Writing region info on filesystem at 1730983985020Initializing all the Stores at 1730983985021 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983985021Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983985021Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983985021Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730983985021Cleaning up temporary data from old regions at 1730983985029 (+8 ms)Running coprocessor post-open hooks at 1730983985032 (+3 ms)Region opened successfully at 1730983985032 2024-11-07T12:53:05,033 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730983985005 2024-11-07T12:53:05,036 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T12:53:05,036 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-07T12:53:05,037 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db9ad1cb6cf9,41419,1730983984101 2024-11-07T12:53:05,038 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9ad1cb6cf9,41419,1730983984101, state=OPEN 2024-11-07T12:53:05,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:53:05,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:53:05,041 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,41419,1730983984101 2024-11-07T12:53:05,041 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:53:05,041 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:53:05,044 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T12:53:05,044 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,41419,1730983984101 in 188 msec 2024-11-07T12:53:05,047 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T12:53:05,047 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 350 msec 2024-11-07T12:53:05,047 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:53:05,048 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-07T12:53:05,049 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:53:05,049 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db9ad1cb6cf9,41419,1730983984101, seqNum=-1] 2024-11-07T12:53:05,049 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:53:05,050 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39637, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:53:05,055 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 801 msec 2024-11-07T12:53:05,056 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730983985055, completionTime=-1 2024-11-07T12:53:05,056 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-07T12:53:05,056 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-07T12:53:05,057 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-07T12:53:05,057 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730984045057 2024-11-07T12:53:05,057 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730984105057 2024-11-07T12:53:05,057 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-07T12:53:05,058 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,43555,1730983984058-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:05,058 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,43555,1730983984058-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:05,058 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,43555,1730983984058-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:05,058 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db9ad1cb6cf9:43555, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:05,058 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:05,058 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:05,060 DEBUG [master/db9ad1cb6cf9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-07T12:53:05,061 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.929sec 2024-11-07T12:53:05,061 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T12:53:05,061 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T12:53:05,061 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T12:53:05,062 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T12:53:05,062 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T12:53:05,062 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,43555,1730983984058-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:53:05,062 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,43555,1730983984058-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T12:53:05,064 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-07T12:53:05,064 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T12:53:05,064 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,43555,1730983984058-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:05,116 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d10ba6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:53:05,116 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request db9ad1cb6cf9,43555,-1 for getting cluster id 2024-11-07T12:53:05,116 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-07T12:53:05,118 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8a38e8d8-a875-49bc-b0a7-792235dd49fd' 2024-11-07T12:53:05,118 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-07T12:53:05,118 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8a38e8d8-a875-49bc-b0a7-792235dd49fd" 2024-11-07T12:53:05,119 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@707b4c03, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:53:05,119 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [db9ad1cb6cf9,43555,-1] 2024-11-07T12:53:05,119 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-07T12:53:05,119 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:53:05,121 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34534, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-07T12:53:05,121 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77c83f30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:53:05,122 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:53:05,122 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db9ad1cb6cf9,41419,1730983984101, seqNum=-1] 2024-11-07T12:53:05,123 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:53:05,124 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48002, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:53:05,126 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=db9ad1cb6cf9,43555,1730983984058 2024-11-07T12:53:05,126 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:53:05,128 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-07T12:53:05,128 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-07T12:53:05,128 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-07T12:53:05,129 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-07T12:53:05,129 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is db9ad1cb6cf9,43555,1730983984058 2024-11-07T12:53:05,129 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@354823d8 2024-11-07T12:53:05,130 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T12:53:05,131 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34538, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T12:53:05,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43555 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-07T12:53:05,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43555 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-07T12:53:05,132 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43555 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:53:05,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43555 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-07T12:53:05,135 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T12:53:05,135 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:05,135 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43555 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-07T12:53:05,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43555 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:53:05,136 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T12:53:05,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36711 is added to blk_1073741835_1011 (size=395) 2024-11-07T12:53:05,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35985 is added to blk_1073741835_1011 (size=395) 2024-11-07T12:53:05,144 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => dcb9d6867ca72c316156aad22b675e8f, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e 2024-11-07T12:53:05,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35985 is added to blk_1073741836_1012 (size=78) 2024-11-07T12:53:05,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36711 is added to blk_1073741836_1012 (size=78) 2024-11-07T12:53:05,151 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:53:05,151 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing dcb9d6867ca72c316156aad22b675e8f, disabling compactions & flushes 2024-11-07T12:53:05,151 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f. 2024-11-07T12:53:05,151 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f. 2024-11-07T12:53:05,151 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f. after waiting 0 ms 2024-11-07T12:53:05,151 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f. 2024-11-07T12:53:05,152 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f. 2024-11-07T12:53:05,152 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for dcb9d6867ca72c316156aad22b675e8f: Waiting for close lock at 1730983985151Disabling compacts and flushes for region at 1730983985151Disabling writes for close at 1730983985151Writing region close event to WAL at 1730983985151Closed at 1730983985152 (+1 ms) 2024-11-07T12:53:05,153 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T12:53:05,153 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1730983985153"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730983985153"}]},"ts":"1730983985153"} 2024-11-07T12:53:05,155 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-07T12:53:05,156 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T12:53:05,156 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730983985156"}]},"ts":"1730983985156"} 2024-11-07T12:53:05,158 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-07T12:53:05,158 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=dcb9d6867ca72c316156aad22b675e8f, ASSIGN}] 2024-11-07T12:53:05,159 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=dcb9d6867ca72c316156aad22b675e8f, ASSIGN 2024-11-07T12:53:05,160 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=dcb9d6867ca72c316156aad22b675e8f, ASSIGN; state=OFFLINE, location=db9ad1cb6cf9,41419,1730983984101; forceNewPlan=false, retain=false 2024-11-07T12:53:05,311 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=dcb9d6867ca72c316156aad22b675e8f, regionState=OPENING, regionLocation=db9ad1cb6cf9,41419,1730983984101 2024-11-07T12:53:05,313 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=dcb9d6867ca72c316156aad22b675e8f, ASSIGN because future has completed 2024-11-07T12:53:05,314 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure dcb9d6867ca72c316156aad22b675e8f, server=db9ad1cb6cf9,41419,1730983984101}] 2024-11-07T12:53:05,470 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f. 2024-11-07T12:53:05,471 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => dcb9d6867ca72c316156aad22b675e8f, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:53:05,471 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart dcb9d6867ca72c316156aad22b675e8f 2024-11-07T12:53:05,471 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:53:05,471 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for dcb9d6867ca72c316156aad22b675e8f 2024-11-07T12:53:05,471 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for dcb9d6867ca72c316156aad22b675e8f 2024-11-07T12:53:05,472 INFO [StoreOpener-dcb9d6867ca72c316156aad22b675e8f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region dcb9d6867ca72c316156aad22b675e8f 2024-11-07T12:53:05,474 INFO [StoreOpener-dcb9d6867ca72c316156aad22b675e8f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dcb9d6867ca72c316156aad22b675e8f columnFamilyName info 2024-11-07T12:53:05,474 DEBUG [StoreOpener-dcb9d6867ca72c316156aad22b675e8f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:05,474 INFO [StoreOpener-dcb9d6867ca72c316156aad22b675e8f-1 {}] regionserver.HStore(327): Store=dcb9d6867ca72c316156aad22b675e8f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:53:05,474 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for dcb9d6867ca72c316156aad22b675e8f 2024-11-07T12:53:05,475 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/default/TestLogRolling-testLogRollOnPipelineRestart/dcb9d6867ca72c316156aad22b675e8f 2024-11-07T12:53:05,475 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/default/TestLogRolling-testLogRollOnPipelineRestart/dcb9d6867ca72c316156aad22b675e8f 2024-11-07T12:53:05,476 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for dcb9d6867ca72c316156aad22b675e8f 2024-11-07T12:53:05,476 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for dcb9d6867ca72c316156aad22b675e8f 2024-11-07T12:53:05,477 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for dcb9d6867ca72c316156aad22b675e8f 2024-11-07T12:53:05,480 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/default/TestLogRolling-testLogRollOnPipelineRestart/dcb9d6867ca72c316156aad22b675e8f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:53:05,480 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened dcb9d6867ca72c316156aad22b675e8f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=737706, jitterRate=-0.06195898354053497}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T12:53:05,480 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for dcb9d6867ca72c316156aad22b675e8f 2024-11-07T12:53:05,481 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for dcb9d6867ca72c316156aad22b675e8f: Running coprocessor pre-open hook at 1730983985471Writing region info on filesystem at 1730983985471Initializing all the Stores at 1730983985472 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730983985472Cleaning up temporary data from old regions at 1730983985476 (+4 ms)Running coprocessor post-open hooks at 1730983985480 (+4 ms)Region opened successfully at 1730983985481 (+1 ms) 2024-11-07T12:53:05,482 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f., pid=6, masterSystemTime=1730983985467 2024-11-07T12:53:05,484 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f. 2024-11-07T12:53:05,484 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f. 2024-11-07T12:53:05,485 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=dcb9d6867ca72c316156aad22b675e8f, regionState=OPEN, openSeqNum=2, regionLocation=db9ad1cb6cf9,41419,1730983984101 2024-11-07T12:53:05,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure dcb9d6867ca72c316156aad22b675e8f, server=db9ad1cb6cf9,41419,1730983984101 because future has completed 2024-11-07T12:53:05,491 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-07T12:53:05,491 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure dcb9d6867ca72c316156aad22b675e8f, server=db9ad1cb6cf9,41419,1730983984101 in 174 msec 2024-11-07T12:53:05,494 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-07T12:53:05,494 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=dcb9d6867ca72c316156aad22b675e8f, ASSIGN in 333 msec 2024-11-07T12:53:05,495 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T12:53:05,495 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730983985495"}]},"ts":"1730983985495"} 2024-11-07T12:53:05,497 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-07T12:53:05,498 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T12:53:05,500 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 366 msec 2024-11-07T12:53:05,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:05,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:06,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:06,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:07,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:07,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:08,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:08,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:09,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:09,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:09,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-07T12:53:09,999 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-07T12:53:10,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-07T12:53:10,000 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-07T12:53:10,001 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:53:10,001 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-07T12:53:10,523 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:53:10,540 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:10,540 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:10,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:10,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:10,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:10,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:10,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:10,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:10,545 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:10,547 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:10,551 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-07T12:53:10,551 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-07T12:53:10,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:10,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:11,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:11,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:12,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:12,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:13,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:13,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:14,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:14,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:15,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43555 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:53:15,243 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-07T12:53:15,243 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-07T12:53:15,246 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-07T12:53:15,246 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f. 2024-11-07T12:53:15,249 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f., hostname=db9ad1cb6cf9,41419,1730983984101, seqNum=2] 2024-11-07T12:53:15,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:15,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:16,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:16,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:17,252 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983984485 2024-11-07T12:53:17,252 WARN [ResponseProcessor for block BP-808981288-172.17.0.2-1730983983398:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-808981288-172.17.0.2-1730983983398:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:17,252 WARN [ResponseProcessor for block BP-808981288-172.17.0.2-1730983983398:blk_1073741832_1008 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-808981288-172.17.0.2-1730983983398:blk_1073741832_1008 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:17,252 WARN [ResponseProcessor for block BP-808981288-172.17.0.2-1730983983398:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-808981288-172.17.0.2-1730983983398:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-808981288-172.17.0.2-1730983983398:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:36711,DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:17,253 WARN [DataStreamer for file /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/WALs/db9ad1cb6cf9,43555,1730983984058/db9ad1cb6cf9%2C43555%2C1730983984058.1730983984186 block BP-808981288-172.17.0.2-1730983983398:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-808981288-172.17.0.2-1730983983398:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36711,DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c,DISK], DatanodeInfoWithStorage[127.0.0.1:35985,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36711,DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c,DISK]) is bad. 2024-11-07T12:53:17,253 WARN [DataStreamer for file /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983984485 block BP-808981288-172.17.0.2-1730983983398:blk_1073741832_1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-808981288-172.17.0.2-1730983983398:blk_1073741832_1008 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36711,DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c,DISK], DatanodeInfoWithStorage[127.0.0.1:35985,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36711,DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c,DISK]) is bad. 2024-11-07T12:53:17,253 WARN [DataStreamer for file /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.meta.1730983985013.meta block BP-808981288-172.17.0.2-1730983983398:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-808981288-172.17.0.2-1730983983398:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35985,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK], DatanodeInfoWithStorage[127.0.0.1:36711,DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36711,DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c,DISK]) is bad. 2024-11-07T12:53:17,253 WARN [PacketResponder: BP-808981288-172.17.0.2-1730983983398:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36711] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:17,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-694984594_22 at /127.0.0.1:38256 [Receiving block BP-808981288-172.17.0.2-1730983983398:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38256 dst: /127.0.0.1:36711 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:17,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976829203_22 at /127.0.0.1:38276 [Receiving block BP-808981288-172.17.0.2-1730983983398:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:36711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38276 dst: /127.0.0.1:36711 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:17,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-694984594_22 at /127.0.0.1:56258 [Receiving block BP-808981288-172.17.0.2-1730983983398:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35985:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56258 dst: /127.0.0.1:35985 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:17,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976829203_22 at /127.0.0.1:56282 [Receiving block BP-808981288-172.17.0.2-1730983983398:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35985:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56282 dst: /127.0.0.1:35985 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:17,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976829203_22 at /127.0.0.1:38296 [Receiving block BP-808981288-172.17.0.2-1730983983398:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38296 dst: /127.0.0.1:36711 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:17,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976829203_22 at /127.0.0.1:56270 [Receiving block BP-808981288-172.17.0.2-1730983983398:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:35985:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56270 dst: /127.0.0.1:35985 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:17,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6987884{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:53:17,257 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@365c2477{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:53:17,257 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:53:17,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@650740c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:53:17,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2efee71d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.log.dir/,STOPPED} 2024-11-07T12:53:17,258 WARN [BP-808981288-172.17.0.2-1730983983398 heartbeating to localhost/127.0.0.1:33619 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:53:17,259 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:53:17,259 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:53:17,259 WARN [BP-808981288-172.17.0.2-1730983983398 heartbeating to localhost/127.0.0.1:33619 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-808981288-172.17.0.2-1730983983398 (Datanode Uuid 71579155-7fe1-45be-8dce-23d19a212a46) service to localhost/127.0.0.1:33619 2024-11-07T12:53:17,259 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/data/data3/current/BP-808981288-172.17.0.2-1730983983398 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:53:17,259 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/data/data4/current/BP-808981288-172.17.0.2-1730983983398 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:53:17,260 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:53:17,272 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:53:17,276 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:53:17,276 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:53:17,276 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:53:17,276 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:53:17,277 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a4031cc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:53:17,277 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c23b3ce{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:53:17,392 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@51dac856{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/java.io.tmpdir/jetty-localhost-42195-hadoop-hdfs-3_4_1-tests_jar-_-any-592632640325142293/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:53:17,392 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@67a2c057{HTTP/1.1, (http/1.1)}{localhost:42195} 2024-11-07T12:53:17,392 INFO [Time-limited test {}] server.Server(415): Started @162661ms 2024-11-07T12:53:17,394 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:53:17,413 WARN [ResponseProcessor for block BP-808981288-172.17.0.2-1730983983398:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-808981288-172.17.0.2-1730983983398:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:17,413 WARN [ResponseProcessor for block BP-808981288-172.17.0.2-1730983983398:blk_1073741832_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-808981288-172.17.0.2-1730983983398:blk_1073741832_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:17,413 WARN [ResponseProcessor for block BP-808981288-172.17.0.2-1730983983398:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-808981288-172.17.0.2-1730983983398:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:17,413 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976829203_22 at /127.0.0.1:45458 [Receiving block BP-808981288-172.17.0.2-1730983983398:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:35985:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45458 dst: /127.0.0.1:35985 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:17,414 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976829203_22 at /127.0.0.1:45446 [Receiving block BP-808981288-172.17.0.2-1730983983398:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35985:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45446 dst: /127.0.0.1:35985 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:17,414 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-694984594_22 at /127.0.0.1:45436 [Receiving block BP-808981288-172.17.0.2-1730983983398:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35985:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45436 dst: /127.0.0.1:35985 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:17,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6bfebe40{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:53:17,417 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78e57521{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:53:17,418 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:53:17,418 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@109832d2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:53:17,418 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f0827f7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.log.dir/,STOPPED} 2024-11-07T12:53:17,419 WARN [BP-808981288-172.17.0.2-1730983983398 heartbeating to localhost/127.0.0.1:33619 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:53:17,419 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:53:17,419 WARN [BP-808981288-172.17.0.2-1730983983398 heartbeating to localhost/127.0.0.1:33619 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-808981288-172.17.0.2-1730983983398 (Datanode Uuid dff7547a-f435-4206-b7a9-252ac4d195cb) service to localhost/127.0.0.1:33619 2024-11-07T12:53:17,419 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:53:17,420 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/data/data1/current/BP-808981288-172.17.0.2-1730983983398 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:53:17,420 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/data/data2/current/BP-808981288-172.17.0.2-1730983983398 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:53:17,420 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:53:17,429 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:53:17,432 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:53:17,433 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:53:17,433 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:53:17,433 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:53:17,433 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@358d2587{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:53:17,433 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b68c165{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:53:17,480 WARN [Thread-1334 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:53:17,483 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfdbe435057c542de with lease ID 0x476c1ac29f0637ca: from storage DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c node DatanodeRegistration(127.0.0.1:37787, datanodeUuid=71579155-7fe1-45be-8dce-23d19a212a46, infoPort=39701, infoSecurePort=0, ipcPort=34643, storageInfo=lv=-57;cid=testClusterID;nsid=683899971;c=1730983983398), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:53:17,483 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfdbe435057c542de with lease ID 0x476c1ac29f0637ca: from storage DS-e097d350-4ac6-47d3-b476-f9e1fa1431c3 node DatanodeRegistration(127.0.0.1:37787, datanodeUuid=71579155-7fe1-45be-8dce-23d19a212a46, infoPort=39701, infoSecurePort=0, ipcPort=34643, storageInfo=lv=-57;cid=testClusterID;nsid=683899971;c=1730983983398), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:53:17,548 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ebcf00b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/java.io.tmpdir/jetty-localhost-43761-hadoop-hdfs-3_4_1-tests_jar-_-any-1236682935688752922/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:53:17,549 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@27265229{HTTP/1.1, (http/1.1)}{localhost:43761} 2024-11-07T12:53:17,549 INFO [Time-limited test {}] server.Server(415): Started @162818ms 2024-11-07T12:53:17,550 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:53:17,625 WARN [Thread-1365 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:53:17,628 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x938e7e098c0a1b8 with lease ID 0x476c1ac29f0637cb: from storage DS-669840dc-6e29-46a5-b7f4-f34832af94cd node DatanodeRegistration(127.0.0.1:41937, datanodeUuid=dff7547a-f435-4206-b7a9-252ac4d195cb, infoPort=43243, infoSecurePort=0, ipcPort=35295, storageInfo=lv=-57;cid=testClusterID;nsid=683899971;c=1730983983398), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:53:17,628 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x938e7e098c0a1b8 with lease ID 0x476c1ac29f0637cb: from storage DS-c681a225-47d7-4f8a-b17a-3bf4cd9dfb3a node DatanodeRegistration(127.0.0.1:41937, datanodeUuid=dff7547a-f435-4206-b7a9-252ac4d195cb, infoPort=43243, infoSecurePort=0, ipcPort=35295, storageInfo=lv=-57;cid=testClusterID;nsid=683899971;c=1730983983398), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:53:17,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:17,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:18,567 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-07T12:53:18,570 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-07T12:53:18,571 ERROR [FSHLog-0-hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e-prefix:db9ad1cb6cf9,41419,1730983984101 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35985,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:18,571 WARN [FSHLog-0-hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e-prefix:db9ad1cb6cf9,41419,1730983984101 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35985,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:18,571 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db9ad1cb6cf9%2C41419%2C1730983984101:(num 1730983984485) roll requested 2024-11-07T12:53:18,572 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 2024-11-07T12:53:18,577 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983984485 newFile=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 2024-11-07T12:53:18,577 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:18,578 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:18,578 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:18,578 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:18,578 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:18,578 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983984485 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 2024-11-07T12:53:18,578 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35985,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:18,579 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35985,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:18,579 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983984485 2024-11-07T12:53:18,579 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43243:43243),(127.0.0.1/127.0.0.1:39701:39701)] 2024-11-07T12:53:18,579 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983984485 is not closed yet, will try archiving it next time 2024-11-07T12:53:18,579 WARN [IPC Server handler 2 on default port 33619 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983984485 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741832_1015 2024-11-07T12:53:18,579 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983984485 after 0ms 2024-11-07T12:53:18,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:18,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:19,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:19,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:20,582 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-07T12:53:20,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:20,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:21,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:21,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:22,482 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741832_1015: GenerationStamp not matched, existing replica is blk_1073741832_1008 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-07T12:53:22,580 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983984485 after 4001ms 2024-11-07T12:53:22,585 WARN [ResponseProcessor for block BP-808981288-172.17.0.2-1730983983398:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-808981288-172.17.0.2-1730983983398:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:22,585 WARN [DataStreamer for file /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 block BP-808981288-172.17.0.2-1730983983398:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-808981288-172.17.0.2-1730983983398:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41937,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK], DatanodeInfoWithStorage[127.0.0.1:37787,DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41937,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK]) is bad. 2024-11-07T12:53:22,585 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976829203_22 at /127.0.0.1:51482 [Receiving block BP-808981288-172.17.0.2-1730983983398:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41937:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51482 dst: /127.0.0.1:41937 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:22,586 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976829203_22 at /127.0.0.1:49472 [Receiving block BP-808981288-172.17.0.2-1730983983398:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:37787:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49472 dst: /127.0.0.1:37787 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:22,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ebcf00b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:53:22,587 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@27265229{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:53:22,587 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:53:22,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b68c165{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:53:22,588 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@358d2587{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.log.dir/,STOPPED} 2024-11-07T12:53:22,589 WARN [BP-808981288-172.17.0.2-1730983983398 heartbeating to localhost/127.0.0.1:33619 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:53:22,589 WARN [BP-808981288-172.17.0.2-1730983983398 heartbeating to localhost/127.0.0.1:33619 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-808981288-172.17.0.2-1730983983398 (Datanode Uuid dff7547a-f435-4206-b7a9-252ac4d195cb) service to localhost/127.0.0.1:33619 2024-11-07T12:53:22,589 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:53:22,589 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:53:22,590 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/data/data1/current/BP-808981288-172.17.0.2-1730983983398 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:53:22,590 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/data/data2/current/BP-808981288-172.17.0.2-1730983983398 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:53:22,590 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:53:22,597 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:53:22,601 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:53:22,601 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:53:22,601 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:53:22,601 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:53:22,602 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ca8564b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:53:22,602 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d151a18{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:53:22,744 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@528106eb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/java.io.tmpdir/jetty-localhost-43221-hadoop-hdfs-3_4_1-tests_jar-_-any-17571144387414543021/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:53:22,745 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c83d523{HTTP/1.1, (http/1.1)}{localhost:43221} 2024-11-07T12:53:22,745 INFO [Time-limited test {}] server.Server(415): Started @168014ms 2024-11-07T12:53:22,746 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:53:22,765 WARN [ResponseProcessor for block BP-808981288-172.17.0.2-1730983983398:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-808981288-172.17.0.2-1730983983398:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:22,765 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-976829203_22 at /127.0.0.1:49480 [Receiving block BP-808981288-172.17.0.2-1730983983398:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:37787:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49480 dst: /127.0.0.1:37787 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:22,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@51dac856{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:53:22,769 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@67a2c057{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:53:22,769 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:53:22,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c23b3ce{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:53:22,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a4031cc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.log.dir/,STOPPED} 2024-11-07T12:53:22,771 WARN [BP-808981288-172.17.0.2-1730983983398 heartbeating to localhost/127.0.0.1:33619 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:53:22,771 WARN [BP-808981288-172.17.0.2-1730983983398 heartbeating to localhost/127.0.0.1:33619 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-808981288-172.17.0.2-1730983983398 (Datanode Uuid 71579155-7fe1-45be-8dce-23d19a212a46) service to localhost/127.0.0.1:33619 2024-11-07T12:53:22,771 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:53:22,771 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:53:22,772 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/data/data3/current/BP-808981288-172.17.0.2-1730983983398 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:53:22,772 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/data/data4/current/BP-808981288-172.17.0.2-1730983983398 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:53:22,772 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:53:22,787 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:53:22,790 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:53:22,790 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:53:22,790 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:53:22,790 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:53:22,791 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@528eeea6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:53:22,791 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2392cae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:53:22,833 WARN [Thread-1408 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:53:22,835 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f908ad627f4803f with lease ID 0x476c1ac29f0637cc: from storage DS-669840dc-6e29-46a5-b7f4-f34832af94cd node DatanodeRegistration(127.0.0.1:36197, datanodeUuid=dff7547a-f435-4206-b7a9-252ac4d195cb, infoPort=45357, infoSecurePort=0, ipcPort=39459, storageInfo=lv=-57;cid=testClusterID;nsid=683899971;c=1730983983398), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:53:22,835 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f908ad627f4803f with lease ID 0x476c1ac29f0637cc: from storage DS-c681a225-47d7-4f8a-b17a-3bf4cd9dfb3a node DatanodeRegistration(127.0.0.1:36197, datanodeUuid=dff7547a-f435-4206-b7a9-252ac4d195cb, infoPort=45357, infoSecurePort=0, ipcPort=39459, storageInfo=lv=-57;cid=testClusterID;nsid=683899971;c=1730983983398), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:53:22,906 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@298f7cca{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/java.io.tmpdir/jetty-localhost-40925-hadoop-hdfs-3_4_1-tests_jar-_-any-6317744488447266041/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:53:22,906 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@42bf2aaa{HTTP/1.1, (http/1.1)}{localhost:40925} 2024-11-07T12:53:22,907 INFO [Time-limited test {}] server.Server(415): Started @168176ms 2024-11-07T12:53:22,908 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:53:22,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:22,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:22,987 WARN [Thread-1439 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:53:22,989 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ad7e3c711b2fd3b with lease ID 0x476c1ac29f0637cd: from storage DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c node DatanodeRegistration(127.0.0.1:40777, datanodeUuid=71579155-7fe1-45be-8dce-23d19a212a46, infoPort=45223, infoSecurePort=0, ipcPort=39109, storageInfo=lv=-57;cid=testClusterID;nsid=683899971;c=1730983983398), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:53:22,990 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ad7e3c711b2fd3b with lease ID 0x476c1ac29f0637cd: from storage DS-e097d350-4ac6-47d3-b476-f9e1fa1431c3 node DatanodeRegistration(127.0.0.1:40777, datanodeUuid=71579155-7fe1-45be-8dce-23d19a212a46, infoPort=45223, infoSecurePort=0, ipcPort=39109, storageInfo=lv=-57;cid=testClusterID;nsid=683899971;c=1730983983398), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:53:23,926 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-07T12:53:23,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:23,928 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-07T12:53:23,929 ERROR [FSHLog-0-hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e-prefix:db9ad1cb6cf9,41419,1730983984101 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37787,DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:23,929 WARN [FSHLog-0-hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e-prefix:db9ad1cb6cf9,41419,1730983984101 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37787,DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:23,929 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db9ad1cb6cf9%2C41419%2C1730983984101:(num 1730983998571) roll requested 2024-11-07T12:53:23,930 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C41419%2C1730983984101.1730984003929 2024-11-07T12:53:23,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:23,935 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 newFile=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984003929 2024-11-07T12:53:23,935 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:23,935 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:23,935 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:23,935 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:23,935 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:23,936 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984003929 2024-11-07T12:53:23,936 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37787,DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:23,936 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37787,DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:23,936 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 2024-11-07T12:53:23,936 WARN [IPC Server handler 3 on default port 33619 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-07T12:53:23,936 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45223:45223),(127.0.0.1/127.0.0.1:45357:45357)] 2024-11-07T12:53:23,936 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 is not closed yet, will try archiving it next time 2024-11-07T12:53:23,937 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 after 0ms 2024-11-07T12:53:24,835 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-07T12:53:24,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:24,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:25,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:25,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:25,938 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 2024-11-07T12:53:25,943 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984003929 newFile=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 2024-11-07T12:53:25,943 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:25,944 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:25,944 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:25,944 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:25,944 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:25,944 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984003929 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 2024-11-07T12:53:25,945 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45223:45223),(127.0.0.1/127.0.0.1:45357:45357)] 2024-11-07T12:53:25,945 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 is not closed yet, will try archiving it next time 2024-11-07T12:53:25,945 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984003929 is not closed yet, will try archiving it next time 2024-11-07T12:53:25,945 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983984485 2024-11-07T12:53:25,945 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983984485 2024-11-07T12:53:25,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36197 is added to blk_1073741838_1019 (size=1264) 2024-11-07T12:53:25,946 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983984485 after 1ms 2024-11-07T12:53:25,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40777 is added to blk_1073741838_1019 (size=1264) 2024-11-07T12:53:25,946 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983984485 2024-11-07T12:53:25,947 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 is not closed yet, will try archiving it next time 2024-11-07T12:53:25,955 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1730983985481/Put/vlen=218/seqid=0] 2024-11-07T12:53:25,955 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1730983995250/Put/vlen=1045/seqid=0] 2024-11-07T12:53:25,955 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983984485 2024-11-07T12:53:25,955 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 2024-11-07T12:53:25,955 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 2024-11-07T12:53:25,956 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 after 1ms 2024-11-07T12:53:25,956 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 2024-11-07T12:53:25,959 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1730983998571/Put/vlen=1045/seqid=0] 2024-11-07T12:53:25,959 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1730984000583/Put/vlen=1045/seqid=0] 2024-11-07T12:53:25,959 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 2024-11-07T12:53:25,959 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984003929 2024-11-07T12:53:25,959 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984003929 2024-11-07T12:53:25,959 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984003929 after 0ms 2024-11-07T12:53:25,959 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984003929 2024-11-07T12:53:25,962 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1730984003929/Put/vlen=1045/seqid=0] 2024-11-07T12:53:25,962 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 2024-11-07T12:53:25,962 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 2024-11-07T12:53:25,962 WARN [IPC Server handler 1 on default port 33619 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-07T12:53:25,962 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 after 0ms 2024-11-07T12:53:26,838 WARN [ResponseProcessor for block BP-808981288-172.17.0.2-1730983983398:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-808981288-172.17.0.2-1730983983398:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:26,837 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-694984594_22 at /127.0.0.1:56396 [Receiving block BP-808981288-172.17.0.2-1730983983398:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:40777:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56396 dst: /127.0.0.1:40777 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:40777 remote=/127.0.0.1:56396]. Total timeout mills is 60000, 59105 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:26,838 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-694984594_22 at /127.0.0.1:52680 [Receiving block BP-808981288-172.17.0.2-1730983983398:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52680 dst: /127.0.0.1:36197 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:53:26,838 WARN [DataStreamer for file /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 block BP-808981288-172.17.0.2-1730983983398:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-808981288-172.17.0.2-1730983983398:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40777,DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c,DISK], DatanodeInfoWithStorage[127.0.0.1:36197,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40777,DS-63fa7d12-97cd-47d9-ad9b-85dcab94cf7c,DISK]) is bad. 2024-11-07T12:53:26,839 WARN [DataStreamer for file /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 block BP-808981288-172.17.0.2-1730983983398:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-808981288-172.17.0.2-1730983983398:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:26,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40777 is added to blk_1073741839_1022 (size=85) 2024-11-07T12:53:26,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:26,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:27,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:27,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:27,937 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730983998571 after 4001ms 2024-11-07T12:53:28,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:28,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:29,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:29,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:29,963 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 after 4001ms 2024-11-07T12:53:29,963 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 2024-11-07T12:53:29,967 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 2024-11-07T12:53:29,967 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing dcb9d6867ca72c316156aad22b675e8f 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-07T12:53:29,967 ERROR [FSHLog-0-hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e-prefix:db9ad1cb6cf9,41419,1730983984101 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-808981288-172.17.0.2-1730983983398:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:29,968 WARN [FSHLog-0-hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e-prefix:db9ad1cb6cf9,41419,1730983984101 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-808981288-172.17.0.2-1730983983398:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:29,968 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db9ad1cb6cf9%2C41419%2C1730983984101:(num 1730984005938) roll requested 2024-11-07T12:53:29,968 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C41419%2C1730983984101.1730984009968 2024-11-07T12:53:29,973 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 newFile=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984009968 2024-11-07T12:53:29,973 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:29,973 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:29,974 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:29,974 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:29,974 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:29,974 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984009968 2024-11-07T12:53:29,974 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-808981288-172.17.0.2-1730983983398:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:29,974 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45357:45357),(127.0.0.1/127.0.0.1:45223:45223)] 2024-11-07T12:53:29,974 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 is not closed yet, will try archiving it next time 2024-11-07T12:53:29,974 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-808981288-172.17.0.2-1730983983398:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:29,975 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 2024-11-07T12:53:29,975 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 after 0ms 2024-11-07T12:53:29,975 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 to hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/oldWALs/db9ad1cb6cf9%2C41419%2C1730983984101.1730984005938 2024-11-07T12:53:29,990 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/default/TestLogRolling-testLogRollOnPipelineRestart/dcb9d6867ca72c316156aad22b675e8f/.tmp/info/4fa01992b2d0485bbb397a8c4a86b6ad is 1080, key is row1002/info:/1730983995250/Put/seqid=0 2024-11-07T12:53:29,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40777 is added to blk_1073741841_1024 (size=9270) 2024-11-07T12:53:29,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36197 is added to blk_1073741841_1024 (size=9270) 2024-11-07T12:53:29,995 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/default/TestLogRolling-testLogRollOnPipelineRestart/dcb9d6867ca72c316156aad22b675e8f/.tmp/info/4fa01992b2d0485bbb397a8c4a86b6ad 2024-11-07T12:53:30,003 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/default/TestLogRolling-testLogRollOnPipelineRestart/dcb9d6867ca72c316156aad22b675e8f/.tmp/info/4fa01992b2d0485bbb397a8c4a86b6ad as hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/default/TestLogRolling-testLogRollOnPipelineRestart/dcb9d6867ca72c316156aad22b675e8f/info/4fa01992b2d0485bbb397a8c4a86b6ad 2024-11-07T12:53:30,008 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/default/TestLogRolling-testLogRollOnPipelineRestart/dcb9d6867ca72c316156aad22b675e8f/info/4fa01992b2d0485bbb397a8c4a86b6ad, entries=4, sequenceid=8, filesize=9.1 K 2024-11-07T12:53:30,009 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for dcb9d6867ca72c316156aad22b675e8f in 42ms, sequenceid=8, compaction requested=false 2024-11-07T12:53:30,009 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for dcb9d6867ca72c316156aad22b675e8f: 2024-11-07T12:53:30,010 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-07T12:53:30,010 ERROR [FSHLog-0-hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e-prefix:db9ad1cb6cf9,41419,1730983984101.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35985,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:30,010 WARN [FSHLog-0-hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e-prefix:db9ad1cb6cf9,41419,1730983984101.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35985,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:30,010 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db9ad1cb6cf9%2C41419%2C1730983984101.meta:.meta(num 1730983985013) roll requested 2024-11-07T12:53:30,011 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C41419%2C1730983984101.meta.1730984010010.meta 2024-11-07T12:53:30,015 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:30,015 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:30,015 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:30,015 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:30,015 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:30,015 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.meta.1730983985013.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.meta.1730984010010.meta 2024-11-07T12:53:30,016 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35985,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:30,016 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35985,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:30,016 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.meta.1730983985013.meta 2024-11-07T12:53:30,016 WARN [IPC Server handler 1 on default port 33619 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.meta.1730983985013.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1014 2024-11-07T12:53:30,016 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.meta.1730983985013.meta after 0ms 2024-11-07T12:53:30,017 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45357:45357),(127.0.0.1/127.0.0.1:45223:45223)] 2024-11-07T12:53:30,017 DEBUG [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.meta.1730983985013.meta is not closed yet, will try archiving it next time 2024-11-07T12:53:30,040 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/.tmp/info/678c868774ea4caaa58e5d2fed738e0b is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f./info:regioninfo/1730983985485/Put/seqid=0 2024-11-07T12:53:30,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40777 is added to blk_1073741843_1027 (size=7125) 2024-11-07T12:53:30,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36197 is added to blk_1073741843_1027 (size=7125) 2024-11-07T12:53:30,045 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/.tmp/info/678c868774ea4caaa58e5d2fed738e0b 2024-11-07T12:53:30,064 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/.tmp/ns/49daa0e9d5ca484c94405112949f204f is 43, key is default/ns:d/1730983985051/Put/seqid=0 2024-11-07T12:53:30,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36197 is added to blk_1073741844_1028 (size=5153) 2024-11-07T12:53:30,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40777 is added to blk_1073741844_1028 (size=5153) 2024-11-07T12:53:30,070 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/.tmp/ns/49daa0e9d5ca484c94405112949f204f 2024-11-07T12:53:30,090 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/.tmp/table/20517641c91b4d008216578a3b0edfdc is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1730983985495/Put/seqid=0 2024-11-07T12:53:30,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40777 is added to blk_1073741845_1029 (size=5438) 2024-11-07T12:53:30,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36197 is added to blk_1073741845_1029 (size=5438) 2024-11-07T12:53:30,096 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/.tmp/table/20517641c91b4d008216578a3b0edfdc 2024-11-07T12:53:30,101 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/.tmp/info/678c868774ea4caaa58e5d2fed738e0b as hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/info/678c868774ea4caaa58e5d2fed738e0b 2024-11-07T12:53:30,106 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/info/678c868774ea4caaa58e5d2fed738e0b, entries=10, sequenceid=11, filesize=7.0 K 2024-11-07T12:53:30,107 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/.tmp/ns/49daa0e9d5ca484c94405112949f204f as hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/ns/49daa0e9d5ca484c94405112949f204f 2024-11-07T12:53:30,111 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/ns/49daa0e9d5ca484c94405112949f204f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-07T12:53:30,112 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/.tmp/table/20517641c91b4d008216578a3b0edfdc as hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/table/20517641c91b4d008216578a3b0edfdc 2024-11-07T12:53:30,117 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/table/20517641c91b4d008216578a3b0edfdc, entries=2, sequenceid=11, filesize=5.3 K 2024-11-07T12:53:30,118 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 108ms, sequenceid=11, compaction requested=false 2024-11-07T12:53:30,118 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-07T12:53:30,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-07T12:53:30,123 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:53:30,123 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:53:30,123 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:53:30,123 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:53:30,123 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-07T12:53:30,124 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T12:53:30,124 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2026866656, stopped=false 2024-11-07T12:53:30,124 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=db9ad1cb6cf9,43555,1730983984058 2024-11-07T12:53:30,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:53:30,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:30,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:53:30,125 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:53:30,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:30,126 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:53:30,126 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:53:30,126 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:53:30,126 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'db9ad1cb6cf9,41419,1730983984101' ***** 2024-11-07T12:53:30,126 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T12:53:30,126 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:53:30,126 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:53:30,127 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:53:30,127 INFO [RS:0;db9ad1cb6cf9:41419 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:53:30,127 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T12:53:30,127 INFO [RS:0;db9ad1cb6cf9:41419 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:53:30,127 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer(3091): Received CLOSE for dcb9d6867ca72c316156aad22b675e8f 2024-11-07T12:53:30,127 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer(959): stopping server db9ad1cb6cf9,41419,1730983984101 2024-11-07T12:53:30,127 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:53:30,127 INFO [RS:0;db9ad1cb6cf9:41419 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;db9ad1cb6cf9:41419. 2024-11-07T12:53:30,127 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:53:30,128 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:53:30,128 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing dcb9d6867ca72c316156aad22b675e8f, disabling compactions & flushes 2024-11-07T12:53:30,128 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f. 2024-11-07T12:53:30,128 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:53:30,128 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f. 2024-11-07T12:53:30,128 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:53:30,128 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f. after waiting 0 ms 2024-11-07T12:53:30,128 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:53:30,128 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f. 2024-11-07T12:53:30,128 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-07T12:53:30,128 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-07T12:53:30,128 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer(1325): Online Regions={dcb9d6867ca72c316156aad22b675e8f=TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f., 1588230740=hbase:meta,,1.1588230740} 2024-11-07T12:53:30,128 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, dcb9d6867ca72c316156aad22b675e8f 2024-11-07T12:53:30,128 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:53:30,128 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:53:30,128 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:53:30,128 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:53:30,129 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:53:30,133 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/default/TestLogRolling-testLogRollOnPipelineRestart/dcb9d6867ca72c316156aad22b675e8f/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-07T12:53:30,133 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-07T12:53:30,133 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f. 2024-11-07T12:53:30,133 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for dcb9d6867ca72c316156aad22b675e8f: Waiting for close lock at 1730984010127Running coprocessor pre-close hooks at 1730984010127Disabling compacts and flushes for region at 1730984010127Disabling writes for close at 1730984010128 (+1 ms)Writing region close event to WAL at 1730984010129 (+1 ms)Running coprocessor post-close hooks at 1730984010133 (+4 ms)Closed at 1730984010133 2024-11-07T12:53:30,133 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1730983985131.dcb9d6867ca72c316156aad22b675e8f. 2024-11-07T12:53:30,134 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:53:30,134 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:53:30,134 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730984010128Running coprocessor pre-close hooks at 1730984010128Disabling compacts and flushes for region at 1730984010128Disabling writes for close at 1730984010128Writing region close event to WAL at 1730984010130 (+2 ms)Running coprocessor post-close hooks at 1730984010134 (+4 ms)Closed at 1730984010134 2024-11-07T12:53:30,134 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T12:53:30,328 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer(976): stopping server db9ad1cb6cf9,41419,1730983984101; all regions closed. 2024-11-07T12:53:30,329 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:30,329 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:30,329 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:30,329 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:30,329 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:30,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40777 is added to blk_1073741842_1025 (size=825) 2024-11-07T12:53:30,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36197 is added to blk_1073741842_1025 (size=825) 2024-11-07T12:53:30,350 INFO [regionserver/db9ad1cb6cf9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-07T12:53:30,350 INFO [regionserver/db9ad1cb6cf9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-07T12:53:30,351 INFO [regionserver/db9ad1cb6cf9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:53:30,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:30,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:31,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:31,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:32,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:32,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:33,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:33,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:33,989 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-07T12:53:34,017 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.meta.1730983985013.meta after 4001ms 2024-11-07T12:53:34,018 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/WALs/db9ad1cb6cf9,41419,1730983984101/db9ad1cb6cf9%2C41419%2C1730983984101.meta.1730983985013.meta to hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/oldWALs/db9ad1cb6cf9%2C41419%2C1730983984101.meta.1730983985013.meta 2024-11-07T12:53:34,020 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/oldWALs 2024-11-07T12:53:34,020 INFO [RS:0;db9ad1cb6cf9:41419 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db9ad1cb6cf9%2C41419%2C1730983984101.meta:.meta(num 1730984010010) 2024-11-07T12:53:34,021 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:34,021 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:34,021 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:34,021 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:34,021 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:34,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40777 is added to blk_1073741840_1023 (size=1162) 2024-11-07T12:53:34,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36197 is added to blk_1073741840_1023 (size=1162) 2024-11-07T12:53:34,028 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/oldWALs 2024-11-07T12:53:34,028 INFO [RS:0;db9ad1cb6cf9:41419 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db9ad1cb6cf9%2C41419%2C1730983984101:(num 1730984009968) 2024-11-07T12:53:34,028 DEBUG [RS:0;db9ad1cb6cf9:41419 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:53:34,028 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:53:34,028 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:53:34,028 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.ChoreService(370): Chore service for: regionserver/db9ad1cb6cf9:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-07T12:53:34,029 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:53:34,029 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:53:34,029 INFO [RS:0;db9ad1cb6cf9:41419 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41419 2024-11-07T12:53:34,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db9ad1cb6cf9,41419,1730983984101 2024-11-07T12:53:34,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:53:34,031 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:53:34,033 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db9ad1cb6cf9,41419,1730983984101] 2024-11-07T12:53:34,034 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/db9ad1cb6cf9,41419,1730983984101 already deleted, retry=false 2024-11-07T12:53:34,034 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; db9ad1cb6cf9,41419,1730983984101 expired; onlineServers=0 2024-11-07T12:53:34,034 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'db9ad1cb6cf9,43555,1730983984058' ***** 2024-11-07T12:53:34,034 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T12:53:34,034 INFO [M:0;db9ad1cb6cf9:43555 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:53:34,034 INFO [M:0;db9ad1cb6cf9:43555 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:53:34,034 DEBUG [M:0;db9ad1cb6cf9:43555 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T12:53:34,035 DEBUG [M:0;db9ad1cb6cf9:43555 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T12:53:34,035 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T12:53:34,035 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730983984261 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730983984261,5,FailOnTimeoutGroup] 2024-11-07T12:53:34,035 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730983984261 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730983984261,5,FailOnTimeoutGroup] 2024-11-07T12:53:34,035 INFO [M:0;db9ad1cb6cf9:43555 {}] hbase.ChoreService(370): Chore service for: master/db9ad1cb6cf9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-07T12:53:34,035 INFO [M:0;db9ad1cb6cf9:43555 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:53:34,035 DEBUG [M:0;db9ad1cb6cf9:43555 {}] master.HMaster(1795): Stopping service threads 2024-11-07T12:53:34,035 INFO [M:0;db9ad1cb6cf9:43555 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T12:53:34,035 INFO [M:0;db9ad1cb6cf9:43555 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:53:34,035 INFO [M:0;db9ad1cb6cf9:43555 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T12:53:34,035 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T12:53:34,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:53:34,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:34,036 DEBUG [M:0;db9ad1cb6cf9:43555 {}] zookeeper.ZKUtil(347): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T12:53:34,036 WARN [M:0;db9ad1cb6cf9:43555 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T12:53:34,037 INFO [M:0;db9ad1cb6cf9:43555 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/.lastflushedseqids 2024-11-07T12:53:34,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40777 is added to blk_1073741846_1030 (size=130) 2024-11-07T12:53:34,042 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T12:53:34,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36197 is added to blk_1073741846_1030 (size=130) 2024-11-07T12:53:34,043 INFO [M:0;db9ad1cb6cf9:43555 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-07T12:53:34,043 INFO [M:0;db9ad1cb6cf9:43555 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T12:53:34,043 DEBUG [M:0;db9ad1cb6cf9:43555 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:53:34,043 INFO [M:0;db9ad1cb6cf9:43555 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:53:34,043 DEBUG [M:0;db9ad1cb6cf9:43555 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:53:34,043 DEBUG [M:0;db9ad1cb6cf9:43555 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:53:34,043 DEBUG [M:0;db9ad1cb6cf9:43555 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:53:34,043 INFO [M:0;db9ad1cb6cf9:43555 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-07T12:53:34,043 ERROR [FSHLog-0-hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData-prefix:db9ad1cb6cf9,43555,1730983984058 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35985,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:34,043 WARN [FSHLog-0-hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData-prefix:db9ad1cb6cf9,43555,1730983984058 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35985,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:34,043 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog db9ad1cb6cf9%2C43555%2C1730983984058:(num 1730983984186) roll requested 2024-11-07T12:53:34,044 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C43555%2C1730983984058.1730984014044 2024-11-07T12:53:34,048 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:34,048 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:34,048 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:34,048 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:34,048 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:34,049 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/WALs/db9ad1cb6cf9,43555,1730983984058/db9ad1cb6cf9%2C43555%2C1730983984058.1730983984186 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/WALs/db9ad1cb6cf9,43555,1730983984058/db9ad1cb6cf9%2C43555%2C1730983984058.1730984014044 2024-11-07T12:53:34,049 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35985,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:34,049 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35985,DS-669840dc-6e29-46a5-b7f4-f34832af94cd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-07T12:53:34,049 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/WALs/db9ad1cb6cf9,43555,1730983984058/db9ad1cb6cf9%2C43555%2C1730983984058.1730983984186 2024-11-07T12:53:34,050 WARN [IPC Server handler 0 on default port 33619 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/WALs/db9ad1cb6cf9,43555,1730983984058/db9ad1cb6cf9%2C43555%2C1730983984058.1730983984186 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-07T12:53:34,050 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/WALs/db9ad1cb6cf9,43555,1730983984058/db9ad1cb6cf9%2C43555%2C1730983984058.1730983984186 after 1ms 2024-11-07T12:53:34,053 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45357:45357),(127.0.0.1/127.0.0.1:45223:45223)] 2024-11-07T12:53:34,053 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/WALs/db9ad1cb6cf9,43555,1730983984058/db9ad1cb6cf9%2C43555%2C1730983984058.1730983984186 is not closed yet, will try archiving it next time 2024-11-07T12:53:34,068 DEBUG [M:0;db9ad1cb6cf9:43555 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cb90bd1a16b3426ab16a1ddf7c1ecd2f is 82, key is hbase:meta,,1/info:regioninfo/1730983985036/Put/seqid=0 2024-11-07T12:53:34,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36197 is added to blk_1073741848_1033 (size=5672) 2024-11-07T12:53:34,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40777 is added to blk_1073741848_1033 (size=5672) 2024-11-07T12:53:34,073 INFO [M:0;db9ad1cb6cf9:43555 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cb90bd1a16b3426ab16a1ddf7c1ecd2f 2024-11-07T12:53:34,092 DEBUG [M:0;db9ad1cb6cf9:43555 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/14e91b4e63e14ea6adf2eb0fede8b335 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1730983985499/Put/seqid=0 2024-11-07T12:53:34,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40777 is added to blk_1073741849_1034 (size=6119) 2024-11-07T12:53:34,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36197 is added to blk_1073741849_1034 (size=6119) 2024-11-07T12:53:34,097 INFO [M:0;db9ad1cb6cf9:43555 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/14e91b4e63e14ea6adf2eb0fede8b335 2024-11-07T12:53:34,115 DEBUG [M:0;db9ad1cb6cf9:43555 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3ab687c9a79446fb8456c1c1d4701558 is 69, key is db9ad1cb6cf9,41419,1730983984101/rs:state/1730983984337/Put/seqid=0 2024-11-07T12:53:34,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40777 is added to blk_1073741850_1035 (size=5156) 2024-11-07T12:53:34,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36197 is added to blk_1073741850_1035 (size=5156) 2024-11-07T12:53:34,120 INFO [M:0;db9ad1cb6cf9:43555 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3ab687c9a79446fb8456c1c1d4701558 2024-11-07T12:53:34,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:53:34,133 INFO [RS:0;db9ad1cb6cf9:41419 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:53:34,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41419-0x1001a4d864a0001, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:53:34,133 INFO [RS:0;db9ad1cb6cf9:41419 {}] regionserver.HRegionServer(1031): Exiting; stopping=db9ad1cb6cf9,41419,1730983984101; zookeeper connection closed. 2024-11-07T12:53:34,133 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1ffe23d1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1ffe23d1 2024-11-07T12:53:34,134 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-07T12:53:34,138 DEBUG [M:0;db9ad1cb6cf9:43555 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9af6cfe504a14dd680efb616d2782165 is 52, key is load_balancer_on/state:d/1730983985127/Put/seqid=0 2024-11-07T12:53:34,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36197 is added to blk_1073741851_1036 (size=5056) 2024-11-07T12:53:34,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40777 is added to blk_1073741851_1036 (size=5056) 2024-11-07T12:53:34,143 INFO [M:0;db9ad1cb6cf9:43555 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9af6cfe504a14dd680efb616d2782165 2024-11-07T12:53:34,148 DEBUG [M:0;db9ad1cb6cf9:43555 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cb90bd1a16b3426ab16a1ddf7c1ecd2f as hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cb90bd1a16b3426ab16a1ddf7c1ecd2f 2024-11-07T12:53:34,153 INFO [M:0;db9ad1cb6cf9:43555 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cb90bd1a16b3426ab16a1ddf7c1ecd2f, entries=8, sequenceid=56, filesize=5.5 K 2024-11-07T12:53:34,154 DEBUG [M:0;db9ad1cb6cf9:43555 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/14e91b4e63e14ea6adf2eb0fede8b335 as hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/14e91b4e63e14ea6adf2eb0fede8b335 2024-11-07T12:53:34,158 INFO [M:0;db9ad1cb6cf9:43555 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/14e91b4e63e14ea6adf2eb0fede8b335, entries=6, sequenceid=56, filesize=6.0 K 2024-11-07T12:53:34,159 DEBUG [M:0;db9ad1cb6cf9:43555 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3ab687c9a79446fb8456c1c1d4701558 as hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3ab687c9a79446fb8456c1c1d4701558 2024-11-07T12:53:34,163 INFO [M:0;db9ad1cb6cf9:43555 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3ab687c9a79446fb8456c1c1d4701558, entries=1, sequenceid=56, filesize=5.0 K 2024-11-07T12:53:34,164 DEBUG [M:0;db9ad1cb6cf9:43555 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9af6cfe504a14dd680efb616d2782165 as hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9af6cfe504a14dd680efb616d2782165 2024-11-07T12:53:34,168 INFO [M:0;db9ad1cb6cf9:43555 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9af6cfe504a14dd680efb616d2782165, entries=1, sequenceid=56, filesize=4.9 K 2024-11-07T12:53:34,169 INFO [M:0;db9ad1cb6cf9:43555 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=56, compaction requested=false 2024-11-07T12:53:34,170 INFO [M:0;db9ad1cb6cf9:43555 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:53:34,170 DEBUG [M:0;db9ad1cb6cf9:43555 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730984014043Disabling compacts and flushes for region at 1730984014043Disabling writes for close at 1730984014043Obtaining lock to block concurrent updates at 1730984014043Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1730984014043Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1730984014043Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1730984014053 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1730984014054 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1730984014067 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1730984014068 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1730984014078 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1730984014091 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1730984014092 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1730984014102 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1730984014115 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1730984014115Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1730984014125 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1730984014138 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1730984014138Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58e32c66: reopening flushed file at 1730984014148 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33148da4: reopening flushed file at 1730984014153 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ffceeae: reopening flushed file at 1730984014158 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@696a59e6: reopening flushed file at 1730984014163 (+5 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=56, compaction requested=false at 1730984014169 (+6 ms)Writing region close event to WAL at 1730984014170 (+1 ms)Closed at 1730984014170 2024-11-07T12:53:34,171 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:34,171 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:34,171 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:34,171 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:34,171 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:53:34,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40777 is added to blk_1073741847_1031 (size=757) 2024-11-07T12:53:34,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36197 is added to blk_1073741847_1031 (size=757) 2024-11-07T12:53:34,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:34,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:35,134 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,134 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,148 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,152 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,152 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,152 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,659 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:53:35,660 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,660 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,660 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,661 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,676 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,676 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,676 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,680 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,680 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,680 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:35,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:35,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:36,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:36,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:36,989 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-07T12:53:37,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:37,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:38,051 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/WALs/db9ad1cb6cf9,43555,1730983984058/db9ad1cb6cf9%2C43555%2C1730983984058.1730983984186 after 4001ms 2024-11-07T12:53:38,051 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/WALs/db9ad1cb6cf9,43555,1730983984058/db9ad1cb6cf9%2C43555%2C1730983984058.1730983984186 to hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/oldWALs/db9ad1cb6cf9%2C43555%2C1730983984058.1730983984186 2024-11-07T12:53:38,054 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/MasterData/oldWALs/db9ad1cb6cf9%2C43555%2C1730983984058.1730983984186 to hdfs://localhost:33619/user/jenkins/test-data/2391029f-1b7e-46a6-6a28-955ab1ca936e/oldWALs/db9ad1cb6cf9%2C43555%2C1730983984058.1730983984186$masterlocalwal$ 2024-11-07T12:53:38,054 INFO [M:0;db9ad1cb6cf9:43555 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-07T12:53:38,054 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:53:38,054 INFO [M:0;db9ad1cb6cf9:43555 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43555 2024-11-07T12:53:38,054 INFO [M:0;db9ad1cb6cf9:43555 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:53:38,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:53:38,156 INFO [M:0;db9ad1cb6cf9:43555 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:53:38,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43555-0x1001a4d864a0000, quorum=127.0.0.1:61466, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:53:38,159 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@298f7cca{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:53:38,159 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@42bf2aaa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:53:38,159 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:53:38,159 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2392cae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:53:38,160 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@528eeea6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.log.dir/,STOPPED} 2024-11-07T12:53:38,161 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:53:38,161 WARN [BP-808981288-172.17.0.2-1730983983398 heartbeating to localhost/127.0.0.1:33619 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:53:38,161 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:53:38,161 WARN [BP-808981288-172.17.0.2-1730983983398 heartbeating to localhost/127.0.0.1:33619 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-808981288-172.17.0.2-1730983983398 (Datanode Uuid 71579155-7fe1-45be-8dce-23d19a212a46) service to localhost/127.0.0.1:33619 2024-11-07T12:53:38,161 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/data/data3/current/BP-808981288-172.17.0.2-1730983983398 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:53:38,161 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/data/data4/current/BP-808981288-172.17.0.2-1730983983398 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:53:38,162 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:53:38,163 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@528106eb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:53:38,164 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c83d523{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:53:38,164 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:53:38,164 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d151a18{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:53:38,164 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ca8564b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.log.dir/,STOPPED} 2024-11-07T12:53:38,165 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:53:38,165 WARN [BP-808981288-172.17.0.2-1730983983398 heartbeating to localhost/127.0.0.1:33619 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:53:38,165 WARN [BP-808981288-172.17.0.2-1730983983398 heartbeating to localhost/127.0.0.1:33619 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-808981288-172.17.0.2-1730983983398 (Datanode Uuid dff7547a-f435-4206-b7a9-252ac4d195cb) service to localhost/127.0.0.1:33619 2024-11-07T12:53:38,165 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:53:38,166 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/data/data1/current/BP-808981288-172.17.0.2-1730983983398 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:53:38,166 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/cluster_5e2fca6a-f843-43a5-7646-1504c4b39dc2/data/data2/current/BP-808981288-172.17.0.2-1730983983398 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:53:38,166 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:53:38,171 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5fe59776{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:53:38,172 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6495f923{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:53:38,172 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:53:38,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a0844a7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:53:38,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73ee6be8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.log.dir/,STOPPED} 2024-11-07T12:53:38,178 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-07T12:53:38,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-07T12:53:38,202 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:33619 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33619 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33619 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33619 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33619 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33619 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33619 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33619 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=51 (was 83), ProcessCount=11 (was 11), AvailableMemoryMB=7816 (was 7963) 2024-11-07T12:53:38,209 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=51, ProcessCount=11, AvailableMemoryMB=7816 2024-11-07T12:53:38,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T12:53:38,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.log.dir so I do NOT create it in target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad 2024-11-07T12:53:38,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c267c3e-2c80-d426-3fbc-ba2b402438a5/hadoop.tmp.dir so I do NOT create it in target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad 2024-11-07T12:53:38,209 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/cluster_82683131-5dec-7358-5963-b63f585a71e0, deleteOnExit=true 2024-11-07T12:53:38,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-07T12:53:38,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/test.cache.data in system properties and HBase conf 2024-11-07T12:53:38,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T12:53:38,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/hadoop.log.dir in system properties and HBase conf 2024-11-07T12:53:38,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T12:53:38,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T12:53:38,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-07T12:53:38,210 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T12:53:38,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:53:38,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:53:38,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T12:53:38,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:53:38,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T12:53:38,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T12:53:38,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:53:38,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:53:38,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T12:53:38,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/nfs.dump.dir in system properties and HBase conf 2024-11-07T12:53:38,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/java.io.tmpdir in system properties and HBase conf 2024-11-07T12:53:38,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:53:38,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T12:53:38,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T12:53:38,223 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:53:38,287 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:53:38,291 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:53:38,293 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:53:38,293 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:53:38,293 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:53:38,293 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:53:38,294 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3eec5be0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:53:38,294 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@294b1089{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:53:38,406 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4135d6d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/java.io.tmpdir/jetty-localhost-46153-hadoop-hdfs-3_4_1-tests_jar-_-any-2165146451016060566/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:53:38,407 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15cd018{HTTP/1.1, (http/1.1)}{localhost:46153} 2024-11-07T12:53:38,407 INFO [Time-limited test {}] server.Server(415): Started @183676ms 2024-11-07T12:53:38,419 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:53:38,465 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:53:38,469 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:53:38,470 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:53:38,470 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:53:38,470 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:53:38,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4114613b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:53:38,471 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67f9152{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:53:38,585 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a4b134d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/java.io.tmpdir/jetty-localhost-33491-hadoop-hdfs-3_4_1-tests_jar-_-any-1118546324155143268/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:53:38,585 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@25c02940{HTTP/1.1, (http/1.1)}{localhost:33491} 2024-11-07T12:53:38,585 INFO [Time-limited test {}] server.Server(415): Started @183854ms 2024-11-07T12:53:38,587 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:53:38,614 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:53:38,617 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:53:38,617 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:53:38,617 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:53:38,617 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:53:38,618 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@677a249b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:53:38,618 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ec454b0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:53:38,667 WARN [Thread-1634 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/cluster_82683131-5dec-7358-5963-b63f585a71e0/data/data2/current/BP-1161455003-172.17.0.2-1730984018240/current, will proceed with Du for space computation calculation, 2024-11-07T12:53:38,667 WARN [Thread-1633 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/cluster_82683131-5dec-7358-5963-b63f585a71e0/data/data1/current/BP-1161455003-172.17.0.2-1730984018240/current, will proceed with Du for space computation calculation, 2024-11-07T12:53:38,683 WARN [Thread-1612 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:53:38,686 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1b311151f561c0c8 with lease ID 0xd9e4cf06d160d976: Processing first storage report for DS-3b8f033f-506c-40d5-a313-f7d4fb2f30c2 from datanode DatanodeRegistration(127.0.0.1:34989, datanodeUuid=a37fa075-cdb6-4a82-8c7d-60b38cb8c89b, infoPort=39573, infoSecurePort=0, ipcPort=37959, storageInfo=lv=-57;cid=testClusterID;nsid=1198148787;c=1730984018240) 2024-11-07T12:53:38,686 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1b311151f561c0c8 with lease ID 0xd9e4cf06d160d976: from storage DS-3b8f033f-506c-40d5-a313-f7d4fb2f30c2 node DatanodeRegistration(127.0.0.1:34989, datanodeUuid=a37fa075-cdb6-4a82-8c7d-60b38cb8c89b, infoPort=39573, infoSecurePort=0, ipcPort=37959, storageInfo=lv=-57;cid=testClusterID;nsid=1198148787;c=1730984018240), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:53:38,686 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1b311151f561c0c8 with lease ID 0xd9e4cf06d160d976: Processing first storage report for DS-728fc68f-a8ef-4093-9598-07a823f2b697 from datanode DatanodeRegistration(127.0.0.1:34989, datanodeUuid=a37fa075-cdb6-4a82-8c7d-60b38cb8c89b, infoPort=39573, infoSecurePort=0, ipcPort=37959, storageInfo=lv=-57;cid=testClusterID;nsid=1198148787;c=1730984018240) 2024-11-07T12:53:38,686 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1b311151f561c0c8 with lease ID 0xd9e4cf06d160d976: from storage DS-728fc68f-a8ef-4093-9598-07a823f2b697 node DatanodeRegistration(127.0.0.1:34989, datanodeUuid=a37fa075-cdb6-4a82-8c7d-60b38cb8c89b, infoPort=39573, infoSecurePort=0, ipcPort=37959, storageInfo=lv=-57;cid=testClusterID;nsid=1198148787;c=1730984018240), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:53:38,736 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6826318a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/java.io.tmpdir/jetty-localhost-38749-hadoop-hdfs-3_4_1-tests_jar-_-any-3101511557139590318/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:53:38,736 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6df2cf02{HTTP/1.1, (http/1.1)}{localhost:38749} 2024-11-07T12:53:38,736 INFO [Time-limited test {}] server.Server(415): Started @184005ms 2024-11-07T12:53:38,738 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:53:38,814 WARN [Thread-1659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/cluster_82683131-5dec-7358-5963-b63f585a71e0/data/data3/current/BP-1161455003-172.17.0.2-1730984018240/current, will proceed with Du for space computation calculation, 2024-11-07T12:53:38,814 WARN [Thread-1660 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/cluster_82683131-5dec-7358-5963-b63f585a71e0/data/data4/current/BP-1161455003-172.17.0.2-1730984018240/current, will proceed with Du for space computation calculation, 2024-11-07T12:53:38,829 WARN [Thread-1648 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:53:38,831 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x323dd058428033bd with lease ID 0xd9e4cf06d160d977: Processing first storage report for DS-e633c161-8359-4067-bdf3-5581f652aa44 from datanode DatanodeRegistration(127.0.0.1:34211, datanodeUuid=df98d698-3c30-459c-bc6d-ce38ba0b2515, infoPort=33991, infoSecurePort=0, ipcPort=40261, storageInfo=lv=-57;cid=testClusterID;nsid=1198148787;c=1730984018240) 2024-11-07T12:53:38,831 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x323dd058428033bd with lease ID 0xd9e4cf06d160d977: from storage DS-e633c161-8359-4067-bdf3-5581f652aa44 node DatanodeRegistration(127.0.0.1:34211, datanodeUuid=df98d698-3c30-459c-bc6d-ce38ba0b2515, infoPort=33991, infoSecurePort=0, ipcPort=40261, storageInfo=lv=-57;cid=testClusterID;nsid=1198148787;c=1730984018240), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:53:38,832 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x323dd058428033bd with lease ID 0xd9e4cf06d160d977: Processing first storage report for DS-4960c150-5a08-4378-8eb1-5227dc892f74 from datanode DatanodeRegistration(127.0.0.1:34211, datanodeUuid=df98d698-3c30-459c-bc6d-ce38ba0b2515, infoPort=33991, infoSecurePort=0, ipcPort=40261, storageInfo=lv=-57;cid=testClusterID;nsid=1198148787;c=1730984018240) 2024-11-07T12:53:38,832 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x323dd058428033bd with lease ID 0xd9e4cf06d160d977: from storage DS-4960c150-5a08-4378-8eb1-5227dc892f74 node DatanodeRegistration(127.0.0.1:34211, datanodeUuid=df98d698-3c30-459c-bc6d-ce38ba0b2515, infoPort=33991, infoSecurePort=0, ipcPort=40261, storageInfo=lv=-57;cid=testClusterID;nsid=1198148787;c=1730984018240), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:53:38,859 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad 2024-11-07T12:53:38,861 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/cluster_82683131-5dec-7358-5963-b63f585a71e0/zookeeper_0, clientPort=58289, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/cluster_82683131-5dec-7358-5963-b63f585a71e0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/cluster_82683131-5dec-7358-5963-b63f585a71e0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T12:53:38,862 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58289 2024-11-07T12:53:38,862 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:53:38,863 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:53:38,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:53:38,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:53:38,873 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e with version=8 2024-11-07T12:53:38,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/hbase-staging 2024-11-07T12:53:38,874 INFO [Time-limited test {}] client.ConnectionUtils(128): master/db9ad1cb6cf9:0 server-side Connection retries=45 2024-11-07T12:53:38,875 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:53:38,875 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:53:38,875 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:53:38,875 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:53:38,875 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:53:38,875 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-07T12:53:38,875 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:53:38,876 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33571 2024-11-07T12:53:38,877 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33571 connecting to ZooKeeper ensemble=127.0.0.1:58289 2024-11-07T12:53:38,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:335710x0, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:53:38,883 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33571-0x1001a4e0e4b0000 connected 2024-11-07T12:53:38,895 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:53:38,896 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:53:38,898 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:53:38,898 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e, hbase.cluster.distributed=false 2024-11-07T12:53:38,899 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:53:38,900 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33571 2024-11-07T12:53:38,900 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33571 2024-11-07T12:53:38,900 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33571 2024-11-07T12:53:38,901 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33571 2024-11-07T12:53:38,901 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33571 2024-11-07T12:53:38,916 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/db9ad1cb6cf9:0 server-side Connection retries=45 2024-11-07T12:53:38,916 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:53:38,916 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:53:38,916 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:53:38,916 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:53:38,916 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:53:38,916 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:53:38,916 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:53:38,917 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33673 2024-11-07T12:53:38,918 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33673 connecting to ZooKeeper ensemble=127.0.0.1:58289 2024-11-07T12:53:38,918 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:53:38,920 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:53:38,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:336730x0, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:53:38,924 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33673-0x1001a4e0e4b0001 connected 2024-11-07T12:53:38,924 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:53:38,924 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:53:38,925 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:53:38,925 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:53:38,926 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:53:38,926 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33673 2024-11-07T12:53:38,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33673 2024-11-07T12:53:38,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33673 2024-11-07T12:53:38,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33673 2024-11-07T12:53:38,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33673 2024-11-07T12:53:38,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:38,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:38,943 DEBUG [M:0;db9ad1cb6cf9:33571 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db9ad1cb6cf9:33571 2024-11-07T12:53:38,943 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/db9ad1cb6cf9,33571,1730984018874 2024-11-07T12:53:38,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:53:38,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:53:38,954 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db9ad1cb6cf9,33571,1730984018874 2024-11-07T12:53:38,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:53:38,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:38,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:38,974 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:53:38,975 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db9ad1cb6cf9,33571,1730984018874 from backup master directory 2024-11-07T12:53:38,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db9ad1cb6cf9,33571,1730984018874 2024-11-07T12:53:38,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:53:38,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:53:38,995 WARN [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:53:38,995 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db9ad1cb6cf9,33571,1730984018874 2024-11-07T12:53:38,999 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/hbase.id] with ID: 7d7a2652-94b1-4cd8-8227-1292ed24979c 2024-11-07T12:53:38,999 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/.tmp/hbase.id 2024-11-07T12:53:39,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:53:39,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:53:39,007 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/.tmp/hbase.id]:[hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/hbase.id] 2024-11-07T12:53:39,017 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:53:39,017 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-07T12:53:39,018 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-07T12:53:39,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:39,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:39,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:53:39,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:53:39,030 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:53:39,031 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T12:53:39,031 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:53:39,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:53:39,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:53:39,042 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store 2024-11-07T12:53:39,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:53:39,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:53:39,049 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:53:39,049 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:53:39,049 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:53:39,049 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:53:39,049 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:53:39,049 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:53:39,049 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:53:39,049 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730984019049Disabling compacts and flushes for region at 1730984019049Disabling writes for close at 1730984019049Writing region close event to WAL at 1730984019049Closed at 1730984019049 2024-11-07T12:53:39,050 WARN [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/.initializing 2024-11-07T12:53:39,050 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/WALs/db9ad1cb6cf9,33571,1730984018874 2024-11-07T12:53:39,052 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C33571%2C1730984018874, suffix=, logDir=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/WALs/db9ad1cb6cf9,33571,1730984018874, archiveDir=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/oldWALs, maxLogs=10 2024-11-07T12:53:39,052 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C33571%2C1730984018874.1730984019052 2024-11-07T12:53:39,057 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/WALs/db9ad1cb6cf9,33571,1730984018874/db9ad1cb6cf9%2C33571%2C1730984018874.1730984019052 2024-11-07T12:53:39,059 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39573:39573),(127.0.0.1/127.0.0.1:33991:33991)] 2024-11-07T12:53:39,060 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:53:39,060 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:53:39,061 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:39,061 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:39,062 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:39,063 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T12:53:39,063 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:39,063 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:39,063 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:39,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T12:53:39,064 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:39,065 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:53:39,065 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:39,066 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T12:53:39,066 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:39,066 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:53:39,066 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:39,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T12:53:39,067 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:39,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:53:39,068 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:39,068 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:39,069 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:39,070 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:39,070 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:39,071 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T12:53:39,073 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:53:39,076 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:53:39,076 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=717189, jitterRate=-0.0880470722913742}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T12:53:39,077 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1730984019061Initializing all the Stores at 1730984019061Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984019061Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984019062 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984019062Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984019062Cleaning up temporary data from old regions at 1730984019070 (+8 ms)Region opened successfully at 1730984019077 (+7 ms) 2024-11-07T12:53:39,077 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T12:53:39,080 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37a07e8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9ad1cb6cf9/172.17.0.2:0 2024-11-07T12:53:39,080 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-07T12:53:39,081 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T12:53:39,081 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T12:53:39,081 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T12:53:39,081 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-07T12:53:39,081 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-07T12:53:39,081 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T12:53:39,085 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T12:53:39,086 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T12:53:39,087 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-07T12:53:39,087 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T12:53:39,088 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T12:53:39,089 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-07T12:53:39,089 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T12:53:39,093 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T12:53:39,094 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-07T12:53:39,095 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T12:53:39,096 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T12:53:39,097 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T12:53:39,099 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T12:53:39,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:53:39,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:53:39,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:39,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:39,101 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=db9ad1cb6cf9,33571,1730984018874, sessionid=0x1001a4e0e4b0000, setting cluster-up flag (Was=false) 2024-11-07T12:53:39,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:39,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:39,108 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T12:53:39,109 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9ad1cb6cf9,33571,1730984018874 2024-11-07T12:53:39,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:39,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:39,117 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T12:53:39,117 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9ad1cb6cf9,33571,1730984018874 2024-11-07T12:53:39,118 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-07T12:53:39,120 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-07T12:53:39,120 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-07T12:53:39,120 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T12:53:39,121 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db9ad1cb6cf9,33571,1730984018874 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T12:53:39,122 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:53:39,122 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:53:39,122 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:53:39,122 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:53:39,122 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db9ad1cb6cf9:0, corePoolSize=10, maxPoolSize=10 2024-11-07T12:53:39,122 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:39,122 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:53:39,122 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:39,123 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730984049123 2024-11-07T12:53:39,123 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T12:53:39,123 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T12:53:39,123 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T12:53:39,123 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T12:53:39,123 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T12:53:39,123 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T12:53:39,123 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,124 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T12:53:39,124 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T12:53:39,124 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T12:53:39,124 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:53:39,124 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-07T12:53:39,124 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T12:53:39,124 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T12:53:39,124 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730984019124,5,FailOnTimeoutGroup] 2024-11-07T12:53:39,124 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730984019124,5,FailOnTimeoutGroup] 2024-11-07T12:53:39,124 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,124 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T12:53:39,125 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,125 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,125 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:39,125 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T12:53:39,129 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(746): ClusterId : 7d7a2652-94b1-4cd8-8227-1292ed24979c 2024-11-07T12:53:39,129 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:53:39,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:53:39,132 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:53:39,132 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:53:39,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:53:39,133 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-07T12:53:39,133 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e 2024-11-07T12:53:39,135 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:53:39,136 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54baa8db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9ad1cb6cf9/172.17.0.2:0 2024-11-07T12:53:39,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:53:39,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:53:39,140 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:53:39,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:53:39,142 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:53:39,142 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:39,143 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:39,143 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:53:39,144 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:53:39,144 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:39,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:39,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:53:39,145 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:53:39,145 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:39,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:39,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:53:39,147 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:53:39,147 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:39,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:39,148 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db9ad1cb6cf9:33673 2024-11-07T12:53:39,148 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T12:53:39,148 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T12:53:39,148 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T12:53:39,148 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:53:39,148 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(2659): reportForDuty to master=db9ad1cb6cf9,33571,1730984018874 with port=33673, startcode=1730984018915 2024-11-07T12:53:39,148 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740 2024-11-07T12:53:39,149 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:53:39,149 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740 2024-11-07T12:53:39,151 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:53:39,151 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:53:39,151 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39103, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:53:39,151 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:53:39,152 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33571 {}] master.ServerManager(363): Checking decommissioned status of RegionServer db9ad1cb6cf9,33673,1730984018915 2024-11-07T12:53:39,152 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33571 {}] master.ServerManager(517): Registering regionserver=db9ad1cb6cf9,33673,1730984018915 2024-11-07T12:53:39,152 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:53:39,153 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e 2024-11-07T12:53:39,153 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39771 2024-11-07T12:53:39,153 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T12:53:39,154 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:53:39,155 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740196, jitterRate=-0.0587923526763916}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:53:39,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1730984019140Initializing all the Stores at 1730984019141 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984019141Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984019141Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984019141Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984019141Cleaning up temporary data from old regions at 1730984019151 (+10 ms)Region opened successfully at 1730984019155 (+4 ms) 2024-11-07T12:53:39,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:53:39,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:53:39,155 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:53:39,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:53:39,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:53:39,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:53:39,156 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] zookeeper.ZKUtil(111): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db9ad1cb6cf9,33673,1730984018915 2024-11-07T12:53:39,156 WARN [RS:0;db9ad1cb6cf9:33673 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:53:39,156 INFO [RS:0;db9ad1cb6cf9:33673 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:53:39,156 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:53:39,156 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/WALs/db9ad1cb6cf9,33673,1730984018915 2024-11-07T12:53:39,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730984019155Disabling compacts and flushes for region at 1730984019155Disabling writes for close at 1730984019155Writing region close event to WAL at 1730984019156 (+1 ms)Closed at 1730984019156 2024-11-07T12:53:39,156 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db9ad1cb6cf9,33673,1730984018915] 2024-11-07T12:53:39,157 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:53:39,157 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-07T12:53:39,157 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T12:53:39,158 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:53:39,159 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:53:39,160 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T12:53:39,161 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:53:39,162 INFO [RS:0;db9ad1cb6cf9:33673 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:53:39,162 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,162 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T12:53:39,162 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T12:53:39,162 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,163 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:39,163 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:39,163 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:39,163 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:39,163 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:39,163 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db9ad1cb6cf9:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:53:39,163 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:39,163 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:39,163 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:39,163 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:39,163 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:39,163 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:53:39,163 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db9ad1cb6cf9:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:53:39,163 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:53:39,167 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,167 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,167 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,167 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,167 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,167 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,33673,1730984018915-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:53:39,181 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:53:39,182 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,33673,1730984018915-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,182 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,182 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.Replication(171): db9ad1cb6cf9,33673,1730984018915 started 2024-11-07T12:53:39,195 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,196 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(1482): Serving as db9ad1cb6cf9,33673,1730984018915, RpcServer on db9ad1cb6cf9/172.17.0.2:33673, sessionid=0x1001a4e0e4b0001 2024-11-07T12:53:39,196 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:53:39,196 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db9ad1cb6cf9,33673,1730984018915 2024-11-07T12:53:39,196 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9ad1cb6cf9,33673,1730984018915' 2024-11-07T12:53:39,196 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:53:39,196 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:53:39,197 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:53:39,197 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:53:39,197 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db9ad1cb6cf9,33673,1730984018915 2024-11-07T12:53:39,197 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9ad1cb6cf9,33673,1730984018915' 2024-11-07T12:53:39,197 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:53:39,197 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:53:39,197 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:53:39,198 INFO [RS:0;db9ad1cb6cf9:33673 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:53:39,198 INFO [RS:0;db9ad1cb6cf9:33673 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:53:39,300 INFO [RS:0;db9ad1cb6cf9:33673 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C33673%2C1730984018915, suffix=, logDir=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/WALs/db9ad1cb6cf9,33673,1730984018915, archiveDir=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/oldWALs, maxLogs=32 2024-11-07T12:53:39,300 INFO [RS:0;db9ad1cb6cf9:33673 {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C33673%2C1730984018915.1730984019300 2024-11-07T12:53:39,305 INFO [RS:0;db9ad1cb6cf9:33673 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/WALs/db9ad1cb6cf9,33673,1730984018915/db9ad1cb6cf9%2C33673%2C1730984018915.1730984019300 2024-11-07T12:53:39,306 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39573:39573),(127.0.0.1/127.0.0.1:33991:33991)] 2024-11-07T12:53:39,310 DEBUG [db9ad1cb6cf9:33571 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-07T12:53:39,310 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db9ad1cb6cf9,33673,1730984018915 2024-11-07T12:53:39,312 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9ad1cb6cf9,33673,1730984018915, state=OPENING 2024-11-07T12:53:39,314 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T12:53:39,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:39,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:53:39,316 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:53:39,316 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,33673,1730984018915}] 2024-11-07T12:53:39,316 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:53:39,316 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:53:39,469 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T12:53:39,471 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49799, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T12:53:39,474 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-07T12:53:39,475 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:53:39,476 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C33673%2C1730984018915.meta, suffix=.meta, logDir=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/WALs/db9ad1cb6cf9,33673,1730984018915, archiveDir=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/oldWALs, maxLogs=32 2024-11-07T12:53:39,477 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C33673%2C1730984018915.meta.1730984019477.meta 2024-11-07T12:53:39,482 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/WALs/db9ad1cb6cf9,33673,1730984018915/db9ad1cb6cf9%2C33673%2C1730984018915.meta.1730984019477.meta 2024-11-07T12:53:39,484 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33991:33991),(127.0.0.1/127.0.0.1:39573:39573)] 2024-11-07T12:53:39,489 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:53:39,489 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T12:53:39,489 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T12:53:39,489 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T12:53:39,489 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T12:53:39,489 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:53:39,489 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-07T12:53:39,489 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-07T12:53:39,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:53:39,491 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:53:39,491 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:39,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:39,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:53:39,493 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:53:39,493 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:39,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:39,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:53:39,494 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:53:39,494 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:39,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:39,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:53:39,495 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:53:39,495 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:39,495 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:53:39,495 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:53:39,496 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740 2024-11-07T12:53:39,497 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740 2024-11-07T12:53:39,498 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:53:39,498 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:53:39,498 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:53:39,499 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:53:39,500 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=771557, jitterRate=-0.01891551911830902}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:53:39,500 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-07T12:53:39,501 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1730984019490Writing region info on filesystem at 1730984019490Initializing all the Stores at 1730984019490Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984019490Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984019490Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984019490Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984019490Cleaning up temporary data from old regions at 1730984019498 (+8 ms)Running coprocessor post-open hooks at 1730984019500 (+2 ms)Region opened successfully at 1730984019501 (+1 ms) 2024-11-07T12:53:39,502 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730984019469 2024-11-07T12:53:39,504 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T12:53:39,504 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-07T12:53:39,505 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db9ad1cb6cf9,33673,1730984018915 2024-11-07T12:53:39,506 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9ad1cb6cf9,33673,1730984018915, state=OPEN 2024-11-07T12:53:39,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:53:39,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:53:39,510 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,33673,1730984018915 2024-11-07T12:53:39,510 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:53:39,510 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:53:39,513 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T12:53:39,513 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,33673,1730984018915 in 194 msec 2024-11-07T12:53:39,515 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T12:53:39,516 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 356 msec 2024-11-07T12:53:39,517 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:53:39,517 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-07T12:53:39,518 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:53:39,518 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db9ad1cb6cf9,33673,1730984018915, seqNum=-1] 2024-11-07T12:53:39,519 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:53:39,520 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54975, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:53:39,525 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 404 msec 2024-11-07T12:53:39,526 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730984019525, completionTime=-1 2024-11-07T12:53:39,526 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-07T12:53:39,526 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-07T12:53:39,527 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-07T12:53:39,527 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730984079527 2024-11-07T12:53:39,528 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730984139527 2024-11-07T12:53:39,528 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-07T12:53:39,528 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,33571,1730984018874-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,528 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,33571,1730984018874-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,528 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,33571,1730984018874-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,528 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db9ad1cb6cf9:33571, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,528 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,528 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,529 DEBUG [master/db9ad1cb6cf9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-07T12:53:39,531 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.536sec 2024-11-07T12:53:39,531 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T12:53:39,531 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T12:53:39,531 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T12:53:39,531 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T12:53:39,531 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T12:53:39,531 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,33571,1730984018874-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:53:39,532 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,33571,1730984018874-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T12:53:39,534 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-07T12:53:39,534 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T12:53:39,534 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,33571,1730984018874-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:53:39,629 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2526c219, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:53:39,629 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request db9ad1cb6cf9,33571,-1 for getting cluster id 2024-11-07T12:53:39,629 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-07T12:53:39,631 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7d7a2652-94b1-4cd8-8227-1292ed24979c' 2024-11-07T12:53:39,632 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-07T12:53:39,632 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7d7a2652-94b1-4cd8-8227-1292ed24979c" 2024-11-07T12:53:39,632 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@236b3c80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:53:39,632 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [db9ad1cb6cf9,33571,-1] 2024-11-07T12:53:39,632 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-07T12:53:39,633 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:53:39,634 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36148, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-07T12:53:39,635 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75e789f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:53:39,635 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:53:39,636 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db9ad1cb6cf9,33673,1730984018915, seqNum=-1] 2024-11-07T12:53:39,637 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:53:39,638 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51498, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:53:39,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=db9ad1cb6cf9,33571,1730984018874 2024-11-07T12:53:39,640 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:53:39,642 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-07T12:53:39,643 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-07T12:53:39,643 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is db9ad1cb6cf9,33571,1730984018874 2024-11-07T12:53:39,643 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@860441f 2024-11-07T12:53:39,643 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T12:53:39,644 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36162, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T12:53:39,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-07T12:53:39,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-07T12:53:39,645 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:53:39,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:53:39,648 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T12:53:39,648 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:39,648 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-07T12:53:39,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:53:39,649 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T12:53:39,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741835_1011 (size=405) 2024-11-07T12:53:39,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741835_1011 (size=405) 2024-11-07T12:53:39,658 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4648fa9b6ec8d73c9292a664d829ba2c, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e 2024-11-07T12:53:39,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741836_1012 (size=88) 2024-11-07T12:53:39,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741836_1012 (size=88) 2024-11-07T12:53:39,665 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:53:39,665 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 4648fa9b6ec8d73c9292a664d829ba2c, disabling compactions & flushes 2024-11-07T12:53:39,665 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:53:39,665 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:53:39,665 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. after waiting 0 ms 2024-11-07T12:53:39,665 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:53:39,665 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:53:39,665 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4648fa9b6ec8d73c9292a664d829ba2c: Waiting for close lock at 1730984019665Disabling compacts and flushes for region at 1730984019665Disabling writes for close at 1730984019665Writing region close event to WAL at 1730984019665Closed at 1730984019665 2024-11-07T12:53:39,666 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T12:53:39,667 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1730984019667"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730984019667"}]},"ts":"1730984019667"} 2024-11-07T12:53:39,669 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-07T12:53:39,670 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T12:53:39,670 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730984019670"}]},"ts":"1730984019670"} 2024-11-07T12:53:39,672 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-07T12:53:39,672 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4648fa9b6ec8d73c9292a664d829ba2c, ASSIGN}] 2024-11-07T12:53:39,674 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4648fa9b6ec8d73c9292a664d829ba2c, ASSIGN 2024-11-07T12:53:39,675 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4648fa9b6ec8d73c9292a664d829ba2c, ASSIGN; state=OFFLINE, location=db9ad1cb6cf9,33673,1730984018915; forceNewPlan=false, retain=false 2024-11-07T12:53:39,825 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4648fa9b6ec8d73c9292a664d829ba2c, regionState=OPENING, regionLocation=db9ad1cb6cf9,33673,1730984018915 2024-11-07T12:53:39,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4648fa9b6ec8d73c9292a664d829ba2c, ASSIGN because future has completed 2024-11-07T12:53:39,828 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4648fa9b6ec8d73c9292a664d829ba2c, server=db9ad1cb6cf9,33673,1730984018915}] 2024-11-07T12:53:39,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:39,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:39,985 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:53:39,985 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4648fa9b6ec8d73c9292a664d829ba2c, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:53:39,985 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 4648fa9b6ec8d73c9292a664d829ba2c 2024-11-07T12:53:39,985 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:53:39,985 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4648fa9b6ec8d73c9292a664d829ba2c 2024-11-07T12:53:39,985 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4648fa9b6ec8d73c9292a664d829ba2c 2024-11-07T12:53:39,987 INFO [StoreOpener-4648fa9b6ec8d73c9292a664d829ba2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4648fa9b6ec8d73c9292a664d829ba2c 2024-11-07T12:53:39,988 INFO [StoreOpener-4648fa9b6ec8d73c9292a664d829ba2c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4648fa9b6ec8d73c9292a664d829ba2c columnFamilyName info 2024-11-07T12:53:39,988 DEBUG [StoreOpener-4648fa9b6ec8d73c9292a664d829ba2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:53:39,989 INFO [StoreOpener-4648fa9b6ec8d73c9292a664d829ba2c-1 {}] regionserver.HStore(327): Store=4648fa9b6ec8d73c9292a664d829ba2c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:53:39,989 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4648fa9b6ec8d73c9292a664d829ba2c 2024-11-07T12:53:39,990 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c 2024-11-07T12:53:39,990 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c 2024-11-07T12:53:39,990 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4648fa9b6ec8d73c9292a664d829ba2c 2024-11-07T12:53:39,990 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4648fa9b6ec8d73c9292a664d829ba2c 2024-11-07T12:53:39,992 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4648fa9b6ec8d73c9292a664d829ba2c 2024-11-07T12:53:39,994 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:53:39,994 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4648fa9b6ec8d73c9292a664d829ba2c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=760292, jitterRate=-0.03323882818222046}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T12:53:39,995 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4648fa9b6ec8d73c9292a664d829ba2c 2024-11-07T12:53:39,995 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4648fa9b6ec8d73c9292a664d829ba2c: Running coprocessor pre-open hook at 1730984019986Writing region info on filesystem at 1730984019986Initializing all the Stores at 1730984019986Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984019986Cleaning up temporary data from old regions at 1730984019990 (+4 ms)Running coprocessor post-open hooks at 1730984019995 (+5 ms)Region opened successfully at 1730984019995 2024-11-07T12:53:39,997 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c., pid=6, masterSystemTime=1730984019981 2024-11-07T12:53:39,999 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:53:39,999 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:53:39,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:53:39,999 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-07T12:53:40,000 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4648fa9b6ec8d73c9292a664d829ba2c, regionState=OPEN, openSeqNum=2, regionLocation=db9ad1cb6cf9,33673,1730984018915 2024-11-07T12:53:40,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-07T12:53:40,002 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4648fa9b6ec8d73c9292a664d829ba2c, server=db9ad1cb6cf9,33673,1730984018915 because future has completed 2024-11-07T12:53:40,007 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-07T12:53:40,007 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4648fa9b6ec8d73c9292a664d829ba2c, server=db9ad1cb6cf9,33673,1730984018915 in 176 msec 2024-11-07T12:53:40,009 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-07T12:53:40,009 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4648fa9b6ec8d73c9292a664d829ba2c, ASSIGN in 335 msec 2024-11-07T12:53:40,010 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T12:53:40,010 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730984020010"}]},"ts":"1730984020010"} 2024-11-07T12:53:40,012 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-07T12:53:40,013 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T12:53:40,015 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 368 msec 2024-11-07T12:53:40,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:40,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:41,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:41,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:42,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:42,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:43,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:43,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:44,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:44,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:44,991 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:53:44,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:44,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:44,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:44,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:44,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:44,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:45,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:45,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:45,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:45,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:45,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:45,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:45,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:45,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:45,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:45,016 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:53:45,159 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-07T12:53:45,159 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-07T12:53:45,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:45,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:46,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:46,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:47,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:47,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:48,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:48,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:49,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:53:49,663 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-07T12:53:49,663 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-07T12:53:49,666 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:53:49,666 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:53:49,669 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c., hostname=db9ad1cb6cf9,33673,1730984018915, seqNum=2] 2024-11-07T12:53:49,676 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:53:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:53:49,682 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-07T12:53:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-07T12:53:49,683 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T12:53:49,684 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T12:53:49,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33673 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-07T12:53:49,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:53:49,845 INFO [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 4648fa9b6ec8d73c9292a664d829ba2c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-07T12:53:49,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/3b856eeba3ec4e0cb92be093457cf15e is 1080, key is row0001/info:/1730984029670/Put/seqid=0 2024-11-07T12:53:49,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741837_1013 (size=6033) 2024-11-07T12:53:49,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741837_1013 (size=6033) 2024-11-07T12:53:49,874 INFO [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/3b856eeba3ec4e0cb92be093457cf15e 2024-11-07T12:53:49,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/3b856eeba3ec4e0cb92be093457cf15e as hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/3b856eeba3ec4e0cb92be093457cf15e 2024-11-07T12:53:49,885 INFO [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/3b856eeba3ec4e0cb92be093457cf15e, entries=1, sequenceid=5, filesize=5.9 K 2024-11-07T12:53:49,886 INFO [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4648fa9b6ec8d73c9292a664d829ba2c in 41ms, sequenceid=5, compaction requested=false 2024-11-07T12:53:49,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 4648fa9b6ec8d73c9292a664d829ba2c: 2024-11-07T12:53:49,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:53:49,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-07T12:53:49,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-07T12:53:49,894 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-07T12:53:49,894 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 207 msec 2024-11-07T12:53:49,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 218 msec 2024-11-07T12:53:49,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:49,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:50,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:50,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:51,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:51,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:52,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:52,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:53,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:53,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:54,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:54,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:55,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:55,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:56,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:56,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:57,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:57,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:58,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:58,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:59,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-07T12:53:59,763 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-07T12:53:59,767 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:53:59,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:53:59,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-07T12:53:59,769 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-07T12:53:59,773 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T12:53:59,773 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T12:53:59,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33673 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-07T12:53:59,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:53:59,927 INFO [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 4648fa9b6ec8d73c9292a664d829ba2c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-07T12:53:59,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/c5eb79fef0cc4bb8bcfda2c20a7374a3 is 1080, key is row0002/info:/1730984039765/Put/seqid=0 2024-11-07T12:53:59,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741838_1014 (size=6033) 2024-11-07T12:53:59,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741838_1014 (size=6033) 2024-11-07T12:53:59,941 INFO [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/c5eb79fef0cc4bb8bcfda2c20a7374a3 2024-11-07T12:53:59,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/c5eb79fef0cc4bb8bcfda2c20a7374a3 as hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/c5eb79fef0cc4bb8bcfda2c20a7374a3 2024-11-07T12:53:59,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:59,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:53:59,953 INFO [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/c5eb79fef0cc4bb8bcfda2c20a7374a3, entries=1, sequenceid=9, filesize=5.9 K 2024-11-07T12:53:59,954 INFO [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4648fa9b6ec8d73c9292a664d829ba2c in 27ms, sequenceid=9, compaction requested=false 2024-11-07T12:53:59,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 4648fa9b6ec8d73c9292a664d829ba2c: 2024-11-07T12:53:59,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:53:59,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-07T12:53:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-07T12:53:59,958 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-07T12:53:59,958 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 183 msec 2024-11-07T12:53:59,960 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 192 msec 2024-11-07T12:54:00,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:00,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:01,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:01,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:02,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:02,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:03,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:03,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:04,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:04,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:04,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 after 68044ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:54:04,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta after 68031ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:54:05,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:05,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:06,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:06,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:07,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:07,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:08,858 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T12:54:08,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:08,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:09,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-07T12:54:09,793 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-07T12:54:09,797 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C33673%2C1730984018915.1730984049797 2024-11-07T12:54:09,802 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:09,802 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:09,803 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:09,803 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:09,803 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:09,803 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/WALs/db9ad1cb6cf9,33673,1730984018915/db9ad1cb6cf9%2C33673%2C1730984018915.1730984019300 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/WALs/db9ad1cb6cf9,33673,1730984018915/db9ad1cb6cf9%2C33673%2C1730984018915.1730984049797 2024-11-07T12:54:09,804 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39573:39573),(127.0.0.1/127.0.0.1:33991:33991)] 2024-11-07T12:54:09,804 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/WALs/db9ad1cb6cf9,33673,1730984018915/db9ad1cb6cf9%2C33673%2C1730984018915.1730984019300 is not closed yet, will try archiving it next time 2024-11-07T12:54:09,804 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:54:09,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741833_1009 (size=5546) 2024-11-07T12:54:09,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741833_1009 (size=5546) 2024-11-07T12:54:09,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:54:09,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-07T12:54:09,807 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-07T12:54:09,808 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T12:54:09,808 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T12:54:09,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:09,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:09,961 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33673 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-07T12:54:09,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:54:09,962 INFO [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 4648fa9b6ec8d73c9292a664d829ba2c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-07T12:54:09,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/3e618302d1c546c4be787be04ff51479 is 1080, key is row0003/info:/1730984049795/Put/seqid=0 2024-11-07T12:54:09,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741840_1016 (size=6033) 2024-11-07T12:54:09,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741840_1016 (size=6033) 2024-11-07T12:54:09,972 INFO [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/3e618302d1c546c4be787be04ff51479 2024-11-07T12:54:09,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/3e618302d1c546c4be787be04ff51479 as hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/3e618302d1c546c4be787be04ff51479 2024-11-07T12:54:09,982 INFO [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/3e618302d1c546c4be787be04ff51479, entries=1, sequenceid=13, filesize=5.9 K 2024-11-07T12:54:09,983 INFO [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4648fa9b6ec8d73c9292a664d829ba2c in 21ms, sequenceid=13, compaction requested=true 2024-11-07T12:54:09,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 4648fa9b6ec8d73c9292a664d829ba2c: 2024-11-07T12:54:09,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:54:09,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-07T12:54:09,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-07T12:54:09,987 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-07T12:54:09,987 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-07T12:54:09,990 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 183 msec 2024-11-07T12:54:10,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:10,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:11,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:11,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:12,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:12,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:13,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:13,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:14,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:14,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:15,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:15,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:16,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:16,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:17,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:17,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:18,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:18,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:19,809 INFO [master/db9ad1cb6cf9:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-07T12:54:19,809 INFO [master/db9ad1cb6cf9:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-07T12:54:19,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-07T12:54:19,824 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-07T12:54:19,824 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:54:19,825 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:54:19,825 DEBUG [Time-limited test {}] regionserver.HStore(1541): 4648fa9b6ec8d73c9292a664d829ba2c/info is initiating minor compaction (all files) 2024-11-07T12:54:19,826 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:54:19,826 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:19,826 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 4648fa9b6ec8d73c9292a664d829ba2c/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:54:19,826 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/3b856eeba3ec4e0cb92be093457cf15e, hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/c5eb79fef0cc4bb8bcfda2c20a7374a3, hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/3e618302d1c546c4be787be04ff51479] into tmpdir=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp, totalSize=17.7 K 2024-11-07T12:54:19,826 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 3b856eeba3ec4e0cb92be093457cf15e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1730984029670 2024-11-07T12:54:19,827 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c5eb79fef0cc4bb8bcfda2c20a7374a3, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1730984039765 2024-11-07T12:54:19,827 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 3e618302d1c546c4be787be04ff51479, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1730984049795 2024-11-07T12:54:19,839 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 4648fa9b6ec8d73c9292a664d829ba2c#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:54:19,840 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/c04d6b16bf014dd29be71b794a34c524 is 1080, key is row0001/info:/1730984029670/Put/seqid=0 2024-11-07T12:54:19,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741841_1017 (size=8296) 2024-11-07T12:54:19,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741841_1017 (size=8296) 2024-11-07T12:54:19,857 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/c04d6b16bf014dd29be71b794a34c524 as hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/c04d6b16bf014dd29be71b794a34c524 2024-11-07T12:54:19,865 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4648fa9b6ec8d73c9292a664d829ba2c/info of 4648fa9b6ec8d73c9292a664d829ba2c into c04d6b16bf014dd29be71b794a34c524(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:54:19,865 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 4648fa9b6ec8d73c9292a664d829ba2c: 2024-11-07T12:54:19,869 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C33673%2C1730984018915.1730984059869 2024-11-07T12:54:19,882 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:19,882 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:19,882 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:19,882 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:19,882 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:19,882 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/WALs/db9ad1cb6cf9,33673,1730984018915/db9ad1cb6cf9%2C33673%2C1730984018915.1730984049797 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/WALs/db9ad1cb6cf9,33673,1730984018915/db9ad1cb6cf9%2C33673%2C1730984018915.1730984059869 2024-11-07T12:54:19,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741839_1015 (size=2520) 2024-11-07T12:54:19,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741839_1015 (size=2520) 2024-11-07T12:54:19,887 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33991:33991),(127.0.0.1/127.0.0.1:39573:39573)] 2024-11-07T12:54:19,888 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/WALs/db9ad1cb6cf9,33673,1730984018915/db9ad1cb6cf9%2C33673%2C1730984018915.1730984019300 to hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/oldWALs/db9ad1cb6cf9%2C33673%2C1730984018915.1730984019300 2024-11-07T12:54:19,889 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:54:19,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:54:19,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-07T12:54:19,892 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-07T12:54:19,893 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T12:54:19,893 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T12:54:19,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:19,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:20,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33673 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-07T12:54:20,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:54:20,047 INFO [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 4648fa9b6ec8d73c9292a664d829ba2c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-07T12:54:20,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/971bfb93fa594c7e89b32d4883f8b01f is 1080, key is row0000/info:/1730984059867/Put/seqid=0 2024-11-07T12:54:20,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741843_1019 (size=6033) 2024-11-07T12:54:20,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741843_1019 (size=6033) 2024-11-07T12:54:20,062 INFO [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/971bfb93fa594c7e89b32d4883f8b01f 2024-11-07T12:54:20,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/971bfb93fa594c7e89b32d4883f8b01f as hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/971bfb93fa594c7e89b32d4883f8b01f 2024-11-07T12:54:20,080 INFO [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/971bfb93fa594c7e89b32d4883f8b01f, entries=1, sequenceid=18, filesize=5.9 K 2024-11-07T12:54:20,082 INFO [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4648fa9b6ec8d73c9292a664d829ba2c in 35ms, sequenceid=18, compaction requested=false 2024-11-07T12:54:20,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 4648fa9b6ec8d73c9292a664d829ba2c: 2024-11-07T12:54:20,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:54:20,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-07T12:54:20,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-07T12:54:20,087 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-07T12:54:20,087 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 191 msec 2024-11-07T12:54:20,090 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 199 msec 2024-11-07T12:54:20,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:20,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:21,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:21,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:22,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:22,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:23,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:23,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:24,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:24,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:24,985 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 4648fa9b6ec8d73c9292a664d829ba2c, had cached 0 bytes from a total of 14329 2024-11-07T12:54:25,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:25,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:26,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:26,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:27,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:27,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:28,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:28,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:29,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33571 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-07T12:54:29,934 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-07T12:54:29,937 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C33673%2C1730984018915.1730984069937 2024-11-07T12:54:29,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:29,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:30,003 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:30,004 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:30,004 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:30,004 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:30,004 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:30,004 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/WALs/db9ad1cb6cf9,33673,1730984018915/db9ad1cb6cf9%2C33673%2C1730984018915.1730984059869 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/WALs/db9ad1cb6cf9,33673,1730984018915/db9ad1cb6cf9%2C33673%2C1730984018915.1730984069937 2024-11-07T12:54:30,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741842_1018 (size=2026) 2024-11-07T12:54:30,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741842_1018 (size=2026) 2024-11-07T12:54:30,008 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/WALs/db9ad1cb6cf9,33673,1730984018915/db9ad1cb6cf9%2C33673%2C1730984018915.1730984049797 to hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/oldWALs/db9ad1cb6cf9%2C33673%2C1730984018915.1730984049797 2024-11-07T12:54:30,029 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39573:39573),(127.0.0.1/127.0.0.1:33991:33991)] 2024-11-07T12:54:30,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-07T12:54:30,029 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:54:30,030 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:54:30,030 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:54:30,030 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:54:30,030 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-07T12:54:30,030 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T12:54:30,030 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=931215768, stopped=false 2024-11-07T12:54:30,030 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=db9ad1cb6cf9,33571,1730984018874 2024-11-07T12:54:30,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:54:30,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:54:30,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:54:30,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:54:30,034 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:54:30,034 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:54:30,034 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:54:30,034 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:54:30,034 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'db9ad1cb6cf9,33673,1730984018915' ***** 2024-11-07T12:54:30,034 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T12:54:30,035 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:54:30,035 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:54:30,036 INFO [RS:0;db9ad1cb6cf9:33673 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:54:30,036 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T12:54:30,036 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:54:30,036 INFO [RS:0;db9ad1cb6cf9:33673 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:54:30,036 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(3091): Received CLOSE for 4648fa9b6ec8d73c9292a664d829ba2c 2024-11-07T12:54:30,045 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(959): stopping server db9ad1cb6cf9,33673,1730984018915 2024-11-07T12:54:30,045 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:54:30,045 INFO [RS:0;db9ad1cb6cf9:33673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;db9ad1cb6cf9:33673. 2024-11-07T12:54:30,045 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:54:30,045 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:54:30,045 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:54:30,045 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:54:30,045 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:54:30,045 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-07T12:54:30,054 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-07T12:54:30,054 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(1325): Online Regions={4648fa9b6ec8d73c9292a664d829ba2c=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c., 1588230740=hbase:meta,,1.1588230740} 2024-11-07T12:54:30,054 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4648fa9b6ec8d73c9292a664d829ba2c 2024-11-07T12:54:30,055 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:54:30,055 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:54:30,056 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:54:30,056 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:54:30,056 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:54:30,056 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-07T12:54:30,057 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4648fa9b6ec8d73c9292a664d829ba2c, disabling compactions & flushes 2024-11-07T12:54:30,057 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:54:30,057 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:54:30,057 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. after waiting 0 ms 2024-11-07T12:54:30,057 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:54:30,057 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 4648fa9b6ec8d73c9292a664d829ba2c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-07T12:54:30,064 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/9f14203eacec4eecb1391d18b0d20a72 is 1080, key is row0001/info:/1730984069935/Put/seqid=0 2024-11-07T12:54:30,095 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/.tmp/info/f7d90a1f1e644dab9b5590cb9bb124bd is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c./info:regioninfo/1730984020000/Put/seqid=0 2024-11-07T12:54:30,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741845_1021 (size=6033) 2024-11-07T12:54:30,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741845_1021 (size=6033) 2024-11-07T12:54:30,110 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/9f14203eacec4eecb1391d18b0d20a72 2024-11-07T12:54:30,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741846_1022 (size=7308) 2024-11-07T12:54:30,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741846_1022 (size=7308) 2024-11-07T12:54:30,124 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/.tmp/info/f7d90a1f1e644dab9b5590cb9bb124bd 2024-11-07T12:54:30,146 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/.tmp/info/9f14203eacec4eecb1391d18b0d20a72 as hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/9f14203eacec4eecb1391d18b0d20a72 2024-11-07T12:54:30,162 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/9f14203eacec4eecb1391d18b0d20a72, entries=1, sequenceid=22, filesize=5.9 K 2024-11-07T12:54:30,164 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4648fa9b6ec8d73c9292a664d829ba2c in 107ms, sequenceid=22, compaction requested=true 2024-11-07T12:54:30,169 INFO [regionserver/db9ad1cb6cf9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-07T12:54:30,169 INFO [regionserver/db9ad1cb6cf9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-07T12:54:30,174 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/3b856eeba3ec4e0cb92be093457cf15e, hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/c5eb79fef0cc4bb8bcfda2c20a7374a3, hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/3e618302d1c546c4be787be04ff51479] to archive 2024-11-07T12:54:30,181 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T12:54:30,184 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/3b856eeba3ec4e0cb92be093457cf15e to hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/3b856eeba3ec4e0cb92be093457cf15e 2024-11-07T12:54:30,185 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/c5eb79fef0cc4bb8bcfda2c20a7374a3 to hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/c5eb79fef0cc4bb8bcfda2c20a7374a3 2024-11-07T12:54:30,187 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/3e618302d1c546c4be787be04ff51479 to hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/info/3e618302d1c546c4be787be04ff51479 2024-11-07T12:54:30,188 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=db9ad1cb6cf9:33571 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-07T12:54:30,188 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [3b856eeba3ec4e0cb92be093457cf15e=6033, c5eb79fef0cc4bb8bcfda2c20a7374a3=6033, 3e618302d1c546c4be787be04ff51479=6033] 2024-11-07T12:54:30,201 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4648fa9b6ec8d73c9292a664d829ba2c/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-07T12:54:30,202 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:54:30,202 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4648fa9b6ec8d73c9292a664d829ba2c: Waiting for close lock at 1730984070057Running coprocessor pre-close hooks at 1730984070057Disabling compacts and flushes for region at 1730984070057Disabling writes for close at 1730984070057Obtaining lock to block concurrent updates at 1730984070057Preparing flush snapshotting stores in 4648fa9b6ec8d73c9292a664d829ba2c at 1730984070057Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1730984070057Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. at 1730984070058 (+1 ms)Flushing 4648fa9b6ec8d73c9292a664d829ba2c/info: creating writer at 1730984070058Flushing 4648fa9b6ec8d73c9292a664d829ba2c/info: appending metadata at 1730984070063 (+5 ms)Flushing 4648fa9b6ec8d73c9292a664d829ba2c/info: closing flushed file at 1730984070063Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43b7c289: reopening flushed file at 1730984070136 (+73 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4648fa9b6ec8d73c9292a664d829ba2c in 107ms, sequenceid=22, compaction requested=true at 1730984070164 (+28 ms)Writing region close event to WAL at 1730984070194 (+30 ms)Running coprocessor post-close hooks at 1730984070202 (+8 ms)Closed at 1730984070202 2024-11-07T12:54:30,202 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1730984019645.4648fa9b6ec8d73c9292a664d829ba2c. 2024-11-07T12:54:30,213 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/.tmp/ns/1f769e53c2d1460b95af0b6900a2433f is 43, key is default/ns:d/1730984019520/Put/seqid=0 2024-11-07T12:54:30,254 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-07T12:54:30,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741847_1023 (size=5153) 2024-11-07T12:54:30,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741847_1023 (size=5153) 2024-11-07T12:54:30,279 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/.tmp/ns/1f769e53c2d1460b95af0b6900a2433f 2024-11-07T12:54:30,319 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/.tmp/table/6b07b59db24144cdb36c26ef644269f0 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1730984020010/Put/seqid=0 2024-11-07T12:54:30,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741848_1024 (size=5508) 2024-11-07T12:54:30,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741848_1024 (size=5508) 2024-11-07T12:54:30,455 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-07T12:54:30,656 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-07T12:54:30,737 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/.tmp/table/6b07b59db24144cdb36c26ef644269f0 2024-11-07T12:54:30,747 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/.tmp/info/f7d90a1f1e644dab9b5590cb9bb124bd as hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/info/f7d90a1f1e644dab9b5590cb9bb124bd 2024-11-07T12:54:30,754 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/info/f7d90a1f1e644dab9b5590cb9bb124bd, entries=10, sequenceid=11, filesize=7.1 K 2024-11-07T12:54:30,758 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/.tmp/ns/1f769e53c2d1460b95af0b6900a2433f as hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/ns/1f769e53c2d1460b95af0b6900a2433f 2024-11-07T12:54:30,767 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/ns/1f769e53c2d1460b95af0b6900a2433f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-07T12:54:30,769 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/.tmp/table/6b07b59db24144cdb36c26ef644269f0 as hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/table/6b07b59db24144cdb36c26ef644269f0 2024-11-07T12:54:30,776 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/table/6b07b59db24144cdb36c26ef644269f0, entries=2, sequenceid=11, filesize=5.4 K 2024-11-07T12:54:30,777 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 721ms, sequenceid=11, compaction requested=false 2024-11-07T12:54:30,785 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-07T12:54:30,785 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:54:30,786 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:54:30,786 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730984070055Running coprocessor pre-close hooks at 1730984070055Disabling compacts and flushes for region at 1730984070055Disabling writes for close at 1730984070056 (+1 ms)Obtaining lock to block concurrent updates at 1730984070056Preparing flush snapshotting stores in 1588230740 at 1730984070056Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1730984070057 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1730984070058 (+1 ms)Flushing 1588230740/info: creating writer at 1730984070058Flushing 1588230740/info: appending metadata at 1730984070094 (+36 ms)Flushing 1588230740/info: closing flushed file at 1730984070094Flushing 1588230740/ns: creating writer at 1730984070152 (+58 ms)Flushing 1588230740/ns: appending metadata at 1730984070213 (+61 ms)Flushing 1588230740/ns: closing flushed file at 1730984070213Flushing 1588230740/table: creating writer at 1730984070292 (+79 ms)Flushing 1588230740/table: appending metadata at 1730984070318 (+26 ms)Flushing 1588230740/table: closing flushed file at 1730984070318Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5597b108: reopening flushed file at 1730984070746 (+428 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65ba85c7: reopening flushed file at 1730984070754 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7583f56a: reopening flushed file at 1730984070767 (+13 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 721ms, sequenceid=11, compaction requested=false at 1730984070777 (+10 ms)Writing region close event to WAL at 1730984070781 (+4 ms)Running coprocessor post-close hooks at 1730984070785 (+4 ms)Closed at 1730984070785 2024-11-07T12:54:30,786 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T12:54:30,856 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(976): stopping server db9ad1cb6cf9,33673,1730984018915; all regions closed. 2024-11-07T12:54:30,857 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:30,857 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:30,857 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:30,857 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:30,857 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:30,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741834_1010 (size=3306) 2024-11-07T12:54:30,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741834_1010 (size=3306) 2024-11-07T12:54:30,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:30,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:31,174 INFO [regionserver/db9ad1cb6cf9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:54:31,269 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/oldWALs 2024-11-07T12:54:31,269 INFO [RS:0;db9ad1cb6cf9:33673 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db9ad1cb6cf9%2C33673%2C1730984018915.meta:.meta(num 1730984019477) 2024-11-07T12:54:31,293 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:31,295 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:31,295 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:31,295 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:31,296 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:31,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741844_1020 (size=1252) 2024-11-07T12:54:31,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741844_1020 (size=1252) 2024-11-07T12:54:31,303 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/oldWALs 2024-11-07T12:54:31,303 INFO [RS:0;db9ad1cb6cf9:33673 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db9ad1cb6cf9%2C33673%2C1730984018915:(num 1730984069937) 2024-11-07T12:54:31,303 DEBUG [RS:0;db9ad1cb6cf9:33673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:54:31,303 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:54:31,303 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:54:31,303 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.ChoreService(370): Chore service for: regionserver/db9ad1cb6cf9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-07T12:54:31,304 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:54:31,304 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:54:31,305 INFO [RS:0;db9ad1cb6cf9:33673 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33673 2024-11-07T12:54:31,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:54:31,307 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:54:31,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db9ad1cb6cf9,33673,1730984018915 2024-11-07T12:54:31,308 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db9ad1cb6cf9,33673,1730984018915] 2024-11-07T12:54:31,311 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/db9ad1cb6cf9,33673,1730984018915 already deleted, retry=false 2024-11-07T12:54:31,311 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; db9ad1cb6cf9,33673,1730984018915 expired; onlineServers=0 2024-11-07T12:54:31,311 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'db9ad1cb6cf9,33571,1730984018874' ***** 2024-11-07T12:54:31,311 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T12:54:31,311 INFO [M:0;db9ad1cb6cf9:33571 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:54:31,311 INFO [M:0;db9ad1cb6cf9:33571 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:54:31,311 DEBUG [M:0;db9ad1cb6cf9:33571 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T12:54:31,311 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T12:54:31,311 DEBUG [M:0;db9ad1cb6cf9:33571 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T12:54:31,311 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730984019124 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730984019124,5,FailOnTimeoutGroup] 2024-11-07T12:54:31,311 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730984019124 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730984019124,5,FailOnTimeoutGroup] 2024-11-07T12:54:31,312 INFO [M:0;db9ad1cb6cf9:33571 {}] hbase.ChoreService(370): Chore service for: master/db9ad1cb6cf9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-07T12:54:31,312 INFO [M:0;db9ad1cb6cf9:33571 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:54:31,312 DEBUG [M:0;db9ad1cb6cf9:33571 {}] master.HMaster(1795): Stopping service threads 2024-11-07T12:54:31,312 INFO [M:0;db9ad1cb6cf9:33571 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T12:54:31,312 INFO [M:0;db9ad1cb6cf9:33571 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:54:31,312 INFO [M:0;db9ad1cb6cf9:33571 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T12:54:31,313 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T12:54:31,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:54:31,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:54:31,314 DEBUG [M:0;db9ad1cb6cf9:33571 {}] zookeeper.ZKUtil(347): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T12:54:31,314 WARN [M:0;db9ad1cb6cf9:33571 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T12:54:31,315 INFO [M:0;db9ad1cb6cf9:33571 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/.lastflushedseqids 2024-11-07T12:54:31,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741849_1025 (size=130) 2024-11-07T12:54:31,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741849_1025 (size=130) 2024-11-07T12:54:31,333 INFO [M:0;db9ad1cb6cf9:33571 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-07T12:54:31,333 INFO [M:0;db9ad1cb6cf9:33571 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T12:54:31,333 DEBUG [M:0;db9ad1cb6cf9:33571 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:54:31,334 INFO [M:0;db9ad1cb6cf9:33571 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:54:31,334 DEBUG [M:0;db9ad1cb6cf9:33571 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:54:31,334 DEBUG [M:0;db9ad1cb6cf9:33571 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:54:31,334 DEBUG [M:0;db9ad1cb6cf9:33571 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:54:31,334 INFO [M:0;db9ad1cb6cf9:33571 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.61 KB heapSize=55.02 KB 2024-11-07T12:54:31,374 DEBUG [M:0;db9ad1cb6cf9:33571 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5b55540f3d854ddba79801b4f790bd04 is 82, key is hbase:meta,,1/info:regioninfo/1730984019505/Put/seqid=0 2024-11-07T12:54:31,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:54:31,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33673-0x1001a4e0e4b0001, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:54:31,410 INFO [RS:0;db9ad1cb6cf9:33673 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:54:31,410 INFO [RS:0;db9ad1cb6cf9:33673 {}] regionserver.HRegionServer(1031): Exiting; stopping=db9ad1cb6cf9,33673,1730984018915; zookeeper connection closed. 2024-11-07T12:54:31,414 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7c7480da {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7c7480da 2024-11-07T12:54:31,415 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-07T12:54:31,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741850_1026 (size=5672) 2024-11-07T12:54:31,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741850_1026 (size=5672) 2024-11-07T12:54:31,426 INFO [M:0;db9ad1cb6cf9:33571 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5b55540f3d854ddba79801b4f790bd04 2024-11-07T12:54:31,458 DEBUG [M:0;db9ad1cb6cf9:33571 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/01081602578d4648b7ba1e602be9ae1e is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1730984020014/Put/seqid=0 2024-11-07T12:54:31,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741851_1027 (size=7825) 2024-11-07T12:54:31,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741851_1027 (size=7825) 2024-11-07T12:54:31,505 INFO [M:0;db9ad1cb6cf9:33571 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.01 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/01081602578d4648b7ba1e602be9ae1e 2024-11-07T12:54:31,516 INFO [M:0;db9ad1cb6cf9:33571 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 01081602578d4648b7ba1e602be9ae1e 2024-11-07T12:54:31,541 DEBUG [M:0;db9ad1cb6cf9:33571 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/65b599ad18cf4b65b385a4dd3a4bd98d is 69, key is db9ad1cb6cf9,33673,1730984018915/rs:state/1730984019152/Put/seqid=0 2024-11-07T12:54:31,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741852_1028 (size=5156) 2024-11-07T12:54:31,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741852_1028 (size=5156) 2024-11-07T12:54:31,587 INFO [M:0;db9ad1cb6cf9:33571 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/65b599ad18cf4b65b385a4dd3a4bd98d 2024-11-07T12:54:31,623 DEBUG [M:0;db9ad1cb6cf9:33571 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/60ea4659851546109cc8b0910638a82c is 52, key is load_balancer_on/state:d/1730984019641/Put/seqid=0 2024-11-07T12:54:31,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741853_1029 (size=5056) 2024-11-07T12:54:31,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741853_1029 (size=5056) 2024-11-07T12:54:31,704 INFO [M:0;db9ad1cb6cf9:33571 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/60ea4659851546109cc8b0910638a82c 2024-11-07T12:54:31,713 DEBUG [M:0;db9ad1cb6cf9:33571 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5b55540f3d854ddba79801b4f790bd04 as hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5b55540f3d854ddba79801b4f790bd04 2024-11-07T12:54:31,720 INFO [M:0;db9ad1cb6cf9:33571 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5b55540f3d854ddba79801b4f790bd04, entries=8, sequenceid=121, filesize=5.5 K 2024-11-07T12:54:31,723 DEBUG [M:0;db9ad1cb6cf9:33571 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/01081602578d4648b7ba1e602be9ae1e as hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/01081602578d4648b7ba1e602be9ae1e 2024-11-07T12:54:31,735 INFO [M:0;db9ad1cb6cf9:33571 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 01081602578d4648b7ba1e602be9ae1e 2024-11-07T12:54:31,735 INFO [M:0;db9ad1cb6cf9:33571 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/01081602578d4648b7ba1e602be9ae1e, entries=14, sequenceid=121, filesize=7.6 K 2024-11-07T12:54:31,737 DEBUG [M:0;db9ad1cb6cf9:33571 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/65b599ad18cf4b65b385a4dd3a4bd98d as hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/65b599ad18cf4b65b385a4dd3a4bd98d 2024-11-07T12:54:31,759 INFO [M:0;db9ad1cb6cf9:33571 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/65b599ad18cf4b65b385a4dd3a4bd98d, entries=1, sequenceid=121, filesize=5.0 K 2024-11-07T12:54:31,761 DEBUG [M:0;db9ad1cb6cf9:33571 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/60ea4659851546109cc8b0910638a82c as hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/60ea4659851546109cc8b0910638a82c 2024-11-07T12:54:31,784 INFO [M:0;db9ad1cb6cf9:33571 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39771/user/jenkins/test-data/65fe1571-137c-f10a-1e29-58b41451042e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/60ea4659851546109cc8b0910638a82c, entries=1, sequenceid=121, filesize=4.9 K 2024-11-07T12:54:31,789 INFO [M:0;db9ad1cb6cf9:33571 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.61 KB/44659, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 455ms, sequenceid=121, compaction requested=false 2024-11-07T12:54:31,836 INFO [M:0;db9ad1cb6cf9:33571 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:54:31,836 DEBUG [M:0;db9ad1cb6cf9:33571 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730984071333Disabling compacts and flushes for region at 1730984071333Disabling writes for close at 1730984071334 (+1 ms)Obtaining lock to block concurrent updates at 1730984071334Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1730984071334Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44659, getHeapSize=56272, getOffHeapSize=0, getCellsCount=140 at 1730984071334Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1730984071335 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1730984071335Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1730984071373 (+38 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1730984071373Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1730984071433 (+60 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1730984071458 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1730984071458Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1730984071516 (+58 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1730984071540 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1730984071540Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1730984071594 (+54 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1730984071620 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1730984071620Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6add6ea0: reopening flushed file at 1730984071712 (+92 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50971c75: reopening flushed file at 1730984071720 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@270d6cbd: reopening flushed file at 1730984071735 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60141c8f: reopening flushed file at 1730984071760 (+25 ms)Finished flush of dataSize ~43.61 KB/44659, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 455ms, sequenceid=121, compaction requested=false at 1730984071789 (+29 ms)Writing region close event to WAL at 1730984071835 (+46 ms)Closed at 1730984071835 2024-11-07T12:54:31,845 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:31,845 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:31,845 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:31,845 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:31,845 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:54:31,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34211 is added to blk_1073741830_1006 (size=53056) 2024-11-07T12:54:31,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34989 is added to blk_1073741830_1006 (size=53056) 2024-11-07T12:54:31,852 INFO [M:0;db9ad1cb6cf9:33571 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-07T12:54:31,852 INFO [M:0;db9ad1cb6cf9:33571 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33571 2024-11-07T12:54:31,852 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:54:31,852 INFO [M:0;db9ad1cb6cf9:33571 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:54:31,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:54:31,955 INFO [M:0;db9ad1cb6cf9:33571 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:54:31,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33571-0x1001a4e0e4b0000, quorum=127.0.0.1:58289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:54:31,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:31,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:31,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6826318a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:54:31,983 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6df2cf02{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:54:31,983 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:54:31,983 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ec454b0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:54:31,983 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@677a249b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/hadoop.log.dir/,STOPPED} 2024-11-07T12:54:31,990 WARN [BP-1161455003-172.17.0.2-1730984018240 heartbeating to localhost/127.0.0.1:39771 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:54:31,990 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:54:31,990 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:54:31,990 WARN [BP-1161455003-172.17.0.2-1730984018240 heartbeating to localhost/127.0.0.1:39771 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1161455003-172.17.0.2-1730984018240 (Datanode Uuid df98d698-3c30-459c-bc6d-ce38ba0b2515) service to localhost/127.0.0.1:39771 2024-11-07T12:54:31,990 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/cluster_82683131-5dec-7358-5963-b63f585a71e0/data/data3/current/BP-1161455003-172.17.0.2-1730984018240 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:54:31,991 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/cluster_82683131-5dec-7358-5963-b63f585a71e0/data/data4/current/BP-1161455003-172.17.0.2-1730984018240 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:54:31,991 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:54:32,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a4b134d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:54:32,020 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25c02940{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:54:32,020 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:54:32,021 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67f9152{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:54:32,021 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4114613b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/hadoop.log.dir/,STOPPED} 2024-11-07T12:54:32,023 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:54:32,023 WARN [BP-1161455003-172.17.0.2-1730984018240 heartbeating to localhost/127.0.0.1:39771 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:54:32,023 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:54:32,023 WARN [BP-1161455003-172.17.0.2-1730984018240 heartbeating to localhost/127.0.0.1:39771 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1161455003-172.17.0.2-1730984018240 (Datanode Uuid a37fa075-cdb6-4a82-8c7d-60b38cb8c89b) service to localhost/127.0.0.1:39771 2024-11-07T12:54:32,024 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/cluster_82683131-5dec-7358-5963-b63f585a71e0/data/data1/current/BP-1161455003-172.17.0.2-1730984018240 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:54:32,024 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/cluster_82683131-5dec-7358-5963-b63f585a71e0/data/data2/current/BP-1161455003-172.17.0.2-1730984018240 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:54:32,024 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:54:32,035 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4135d6d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:54:32,036 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15cd018{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:54:32,036 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:54:32,036 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@294b1089{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:54:32,036 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3eec5be0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/hadoop.log.dir/,STOPPED} 2024-11-07T12:54:32,049 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-07T12:54:32,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-07T12:54:32,090 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=204 (was 179) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39771 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39771 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39771 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39771 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39771 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:39771 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39771 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39771 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=486 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=70 (was 51) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8282 (was 7816) - AvailableMemoryMB LEAK? - 2024-11-07T12:54:32,113 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=204, OpenFileDescriptor=486, MaxFileDescriptor=1048576, SystemLoadAverage=70, ProcessCount=11, AvailableMemoryMB=8281 2024-11-07T12:54:32,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T12:54:32,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/hadoop.log.dir so I do NOT create it in target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda 2024-11-07T12:54:32,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1a0bfac4-1799-ac3d-0826-44842dcfcbad/hadoop.tmp.dir so I do NOT create it in target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda 2024-11-07T12:54:32,113 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/cluster_c509f59f-2b48-e0a5-b5fb-64f4eb5de5ef, deleteOnExit=true 2024-11-07T12:54:32,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-07T12:54:32,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/test.cache.data in system properties and HBase conf 2024-11-07T12:54:32,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T12:54:32,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/hadoop.log.dir in system properties and HBase conf 2024-11-07T12:54:32,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T12:54:32,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T12:54:32,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-07T12:54:32,114 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T12:54:32,115 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:54:32,115 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:54:32,116 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T12:54:32,116 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:54:32,117 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T12:54:32,117 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T12:54:32,117 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:54:32,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:54:32,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T12:54:32,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/nfs.dump.dir in system properties and HBase conf 2024-11-07T12:54:32,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/java.io.tmpdir in system properties and HBase conf 2024-11-07T12:54:32,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:54:32,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T12:54:32,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T12:54:32,146 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:54:32,259 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:54:32,264 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:54:32,273 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:54:32,273 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:54:32,273 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:54:32,275 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:54:32,280 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8e4b628{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:54:32,281 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38dc0fd7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:54:32,440 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@60125866{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/java.io.tmpdir/jetty-localhost-37603-hadoop-hdfs-3_4_1-tests_jar-_-any-7476132928953314068/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:54:32,441 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@273a6f23{HTTP/1.1, (http/1.1)}{localhost:37603} 2024-11-07T12:54:32,441 INFO [Time-limited test {}] server.Server(415): Started @237710ms 2024-11-07T12:54:32,460 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:54:32,627 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:54:32,631 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:54:32,632 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:54:32,632 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:54:32,632 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:54:32,633 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1091e18a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:54:32,633 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5411f427{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:54:32,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3bc081d8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/java.io.tmpdir/jetty-localhost-39643-hadoop-hdfs-3_4_1-tests_jar-_-any-1292769015830450118/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:54:32,798 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3aca12f3{HTTP/1.1, (http/1.1)}{localhost:39643} 2024-11-07T12:54:32,798 INFO [Time-limited test {}] server.Server(415): Started @238067ms 2024-11-07T12:54:32,799 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:54:32,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:32,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:32,973 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:54:32,979 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:54:32,980 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:54:32,980 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:54:32,980 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:54:33,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27cee48d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:54:33,010 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d7cc900{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:54:33,086 WARN [Thread-1952 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/cluster_c509f59f-2b48-e0a5-b5fb-64f4eb5de5ef/data/data2/current/BP-1024326402-172.17.0.2-1730984072159/current, will proceed with Du for space computation calculation, 2024-11-07T12:54:33,090 WARN [Thread-1951 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/cluster_c509f59f-2b48-e0a5-b5fb-64f4eb5de5ef/data/data1/current/BP-1024326402-172.17.0.2-1730984072159/current, will proceed with Du for space computation calculation, 2024-11-07T12:54:33,163 WARN [Thread-1930 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:54:33,172 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x89f9c6fc9b2b7db2 with lease ID 0x65676e89513af3d8: Processing first storage report for DS-abe50188-b3ca-4ee4-9307-fe234cd35c7b from datanode DatanodeRegistration(127.0.0.1:38801, datanodeUuid=df3b3ded-3155-41ba-aa35-e05dd4eb1945, infoPort=45039, infoSecurePort=0, ipcPort=38167, storageInfo=lv=-57;cid=testClusterID;nsid=1479052776;c=1730984072159) 2024-11-07T12:54:33,172 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x89f9c6fc9b2b7db2 with lease ID 0x65676e89513af3d8: from storage DS-abe50188-b3ca-4ee4-9307-fe234cd35c7b node DatanodeRegistration(127.0.0.1:38801, datanodeUuid=df3b3ded-3155-41ba-aa35-e05dd4eb1945, infoPort=45039, infoSecurePort=0, ipcPort=38167, storageInfo=lv=-57;cid=testClusterID;nsid=1479052776;c=1730984072159), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:54:33,172 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x89f9c6fc9b2b7db2 with lease ID 0x65676e89513af3d8: Processing first storage report for DS-b63e6820-14a6-4f22-bb2e-c09f551fc0d6 from datanode DatanodeRegistration(127.0.0.1:38801, datanodeUuid=df3b3ded-3155-41ba-aa35-e05dd4eb1945, infoPort=45039, infoSecurePort=0, ipcPort=38167, storageInfo=lv=-57;cid=testClusterID;nsid=1479052776;c=1730984072159) 2024-11-07T12:54:33,172 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x89f9c6fc9b2b7db2 with lease ID 0x65676e89513af3d8: from storage DS-b63e6820-14a6-4f22-bb2e-c09f551fc0d6 node DatanodeRegistration(127.0.0.1:38801, datanodeUuid=df3b3ded-3155-41ba-aa35-e05dd4eb1945, infoPort=45039, infoSecurePort=0, ipcPort=38167, storageInfo=lv=-57;cid=testClusterID;nsid=1479052776;c=1730984072159), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:54:33,197 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78512cf7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/java.io.tmpdir/jetty-localhost-45059-hadoop-hdfs-3_4_1-tests_jar-_-any-15477225610269734323/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:54:33,200 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b6783f5{HTTP/1.1, (http/1.1)}{localhost:45059} 2024-11-07T12:54:33,200 INFO [Time-limited test {}] server.Server(415): Started @238469ms 2024-11-07T12:54:33,201 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:54:33,372 WARN [Thread-1977 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/cluster_c509f59f-2b48-e0a5-b5fb-64f4eb5de5ef/data/data3/current/BP-1024326402-172.17.0.2-1730984072159/current, will proceed with Du for space computation calculation, 2024-11-07T12:54:33,388 WARN [Thread-1978 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/cluster_c509f59f-2b48-e0a5-b5fb-64f4eb5de5ef/data/data4/current/BP-1024326402-172.17.0.2-1730984072159/current, will proceed with Du for space computation calculation, 2024-11-07T12:54:33,470 WARN [Thread-1966 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:54:33,483 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1c3825d68557862 with lease ID 0x65676e89513af3d9: Processing first storage report for DS-6cfbf6d2-b38a-465c-b70c-5cfffd43cb79 from datanode DatanodeRegistration(127.0.0.1:44169, datanodeUuid=c14b1b0e-981e-4df1-a4d5-4a7378a03ce8, infoPort=40207, infoSecurePort=0, ipcPort=36815, storageInfo=lv=-57;cid=testClusterID;nsid=1479052776;c=1730984072159) 2024-11-07T12:54:33,484 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1c3825d68557862 with lease ID 0x65676e89513af3d9: from storage DS-6cfbf6d2-b38a-465c-b70c-5cfffd43cb79 node DatanodeRegistration(127.0.0.1:44169, datanodeUuid=c14b1b0e-981e-4df1-a4d5-4a7378a03ce8, infoPort=40207, infoSecurePort=0, ipcPort=36815, storageInfo=lv=-57;cid=testClusterID;nsid=1479052776;c=1730984072159), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:54:33,484 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1c3825d68557862 with lease ID 0x65676e89513af3d9: Processing first storage report for DS-3b6a6fbb-8120-4479-92dc-222b25122c6f from datanode DatanodeRegistration(127.0.0.1:44169, datanodeUuid=c14b1b0e-981e-4df1-a4d5-4a7378a03ce8, infoPort=40207, infoSecurePort=0, ipcPort=36815, storageInfo=lv=-57;cid=testClusterID;nsid=1479052776;c=1730984072159) 2024-11-07T12:54:33,484 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1c3825d68557862 with lease ID 0x65676e89513af3d9: from storage DS-3b6a6fbb-8120-4479-92dc-222b25122c6f node DatanodeRegistration(127.0.0.1:44169, datanodeUuid=c14b1b0e-981e-4df1-a4d5-4a7378a03ce8, infoPort=40207, infoSecurePort=0, ipcPort=36815, storageInfo=lv=-57;cid=testClusterID;nsid=1479052776;c=1730984072159), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:54:33,525 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda 2024-11-07T12:54:33,604 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/cluster_c509f59f-2b48-e0a5-b5fb-64f4eb5de5ef/zookeeper_0, clientPort=59865, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/cluster_c509f59f-2b48-e0a5-b5fb-64f4eb5de5ef/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/cluster_c509f59f-2b48-e0a5-b5fb-64f4eb5de5ef/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T12:54:33,616 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59865 2024-11-07T12:54:33,617 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:54:33,618 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:54:33,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:54:33,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:54:33,683 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc with version=8 2024-11-07T12:54:33,683 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/hbase-staging 2024-11-07T12:54:33,686 INFO [Time-limited test {}] client.ConnectionUtils(128): master/db9ad1cb6cf9:0 server-side Connection retries=45 2024-11-07T12:54:33,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:54:33,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:54:33,686 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:54:33,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:54:33,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:54:33,686 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-07T12:54:33,686 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:54:33,697 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41575 2024-11-07T12:54:33,698 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41575 connecting to ZooKeeper ensemble=127.0.0.1:59865 2024-11-07T12:54:33,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:415750x0, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:54:33,724 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41575-0x1001a4ee4000000 connected 2024-11-07T12:54:33,797 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:54:33,799 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:54:33,803 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:54:33,803 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc, hbase.cluster.distributed=false 2024-11-07T12:54:33,806 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:54:33,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41575 2024-11-07T12:54:33,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41575 2024-11-07T12:54:33,815 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41575 2024-11-07T12:54:33,815 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41575 2024-11-07T12:54:33,816 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41575 2024-11-07T12:54:33,836 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/db9ad1cb6cf9:0 server-side Connection retries=45 2024-11-07T12:54:33,836 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:54:33,836 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:54:33,836 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:54:33,836 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:54:33,836 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:54:33,836 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:54:33,836 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:54:33,841 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45177 2024-11-07T12:54:33,842 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45177 connecting to ZooKeeper ensemble=127.0.0.1:59865 2024-11-07T12:54:33,843 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:54:33,845 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:54:33,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:451770x0, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:54:33,851 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:451770x0, quorum=127.0.0.1:59865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:54:33,851 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45177-0x1001a4ee4000001 connected 2024-11-07T12:54:33,851 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:54:33,856 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:54:33,857 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:54:33,858 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:54:33,860 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45177 2024-11-07T12:54:33,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45177 2024-11-07T12:54:33,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45177 2024-11-07T12:54:33,867 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45177 2024-11-07T12:54:33,868 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45177 2024-11-07T12:54:33,884 DEBUG [M:0;db9ad1cb6cf9:41575 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db9ad1cb6cf9:41575 2024-11-07T12:54:33,885 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/db9ad1cb6cf9,41575,1730984073685 2024-11-07T12:54:33,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:54:33,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:54:33,889 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db9ad1cb6cf9,41575,1730984073685 2024-11-07T12:54:33,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:54:33,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:54:33,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:54:33,906 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:54:33,907 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db9ad1cb6cf9,41575,1730984073685 from backup master directory 2024-11-07T12:54:33,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:54:33,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db9ad1cb6cf9,41575,1730984073685 2024-11-07T12:54:33,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:54:33,909 WARN [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:54:33,910 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db9ad1cb6cf9,41575,1730984073685 2024-11-07T12:54:33,927 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/hbase.id] with ID: ae9d5338-1cef-41f3-a7fd-dc91a0563500 2024-11-07T12:54:33,927 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/.tmp/hbase.id 2024-11-07T12:54:33,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:54:33,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:54:33,944 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/.tmp/hbase.id]:[hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/hbase.id] 2024-11-07T12:54:33,959 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:54:33,959 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-07T12:54:33,960 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-07T12:54:33,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:54:33,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:54:33,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:33,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:33,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:54:33,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:54:33,975 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:54:33,976 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T12:54:33,977 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:54:34,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:54:34,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:54:34,020 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store 2024-11-07T12:54:34,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:54:34,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:54:34,050 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:54:34,050 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:54:34,050 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:54:34,050 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:54:34,050 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:54:34,050 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:54:34,050 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:54:34,050 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730984074050Disabling compacts and flushes for region at 1730984074050Disabling writes for close at 1730984074050Writing region close event to WAL at 1730984074050Closed at 1730984074050 2024-11-07T12:54:34,052 WARN [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/.initializing 2024-11-07T12:54:34,052 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/WALs/db9ad1cb6cf9,41575,1730984073685 2024-11-07T12:54:34,056 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C41575%2C1730984073685, suffix=, logDir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/WALs/db9ad1cb6cf9,41575,1730984073685, archiveDir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/oldWALs, maxLogs=10 2024-11-07T12:54:34,057 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C41575%2C1730984073685.1730984074056 2024-11-07T12:54:34,084 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/WALs/db9ad1cb6cf9,41575,1730984073685/db9ad1cb6cf9%2C41575%2C1730984073685.1730984074056 2024-11-07T12:54:34,113 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45039:45039),(127.0.0.1/127.0.0.1:40207:40207)] 2024-11-07T12:54:34,143 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:54:34,144 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:54:34,144 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:54:34,144 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:54:34,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:54:34,155 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T12:54:34,155 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:54:34,155 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:54:34,155 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:54:34,157 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T12:54:34,157 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:54:34,158 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:54:34,158 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:54:34,160 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T12:54:34,160 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:54:34,160 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:54:34,160 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:54:34,161 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T12:54:34,161 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:54:34,162 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:54:34,162 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:54:34,163 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:54:34,164 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:54:34,165 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:54:34,165 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:54:34,166 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T12:54:34,167 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:54:34,187 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:54:34,188 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=866604, jitterRate=0.10194520652294159}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T12:54:34,188 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1730984074144Initializing all the Stores at 1730984074146 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984074147 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984074152 (+5 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984074152Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984074152Cleaning up temporary data from old regions at 1730984074165 (+13 ms)Region opened successfully at 1730984074188 (+23 ms) 2024-11-07T12:54:34,197 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T12:54:34,201 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@124f5ef5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9ad1cb6cf9/172.17.0.2:0 2024-11-07T12:54:34,206 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-07T12:54:34,206 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T12:54:34,206 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T12:54:34,206 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T12:54:34,208 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-07T12:54:34,208 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-07T12:54:34,208 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T12:54:34,219 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T12:54:34,220 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T12:54:34,229 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-07T12:54:34,229 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T12:54:34,231 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T12:54:34,232 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-07T12:54:34,232 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T12:54:34,235 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T12:54:34,237 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-07T12:54:34,239 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T12:54:34,240 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T12:54:34,242 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T12:54:34,243 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T12:54:34,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:54:34,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:54:34,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:54:34,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:54:34,248 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=db9ad1cb6cf9,41575,1730984073685, sessionid=0x1001a4ee4000000, setting cluster-up flag (Was=false) 2024-11-07T12:54:34,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:54:34,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:54:34,265 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T12:54:34,266 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9ad1cb6cf9,41575,1730984073685 2024-11-07T12:54:34,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:54:34,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:54:34,285 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T12:54:34,287 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9ad1cb6cf9,41575,1730984073685 2024-11-07T12:54:34,292 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-07T12:54:34,314 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-07T12:54:34,315 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-07T12:54:34,315 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T12:54:34,315 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db9ad1cb6cf9,41575,1730984073685 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T12:54:34,317 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:54:34,318 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:54:34,318 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:54:34,318 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:54:34,318 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db9ad1cb6cf9:0, corePoolSize=10, maxPoolSize=10 2024-11-07T12:54:34,318 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:54:34,318 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:54:34,318 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:54:34,331 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:54:34,331 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-07T12:54:34,336 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:54:34,336 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T12:54:34,336 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730984104336 2024-11-07T12:54:34,337 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T12:54:34,337 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T12:54:34,337 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T12:54:34,337 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T12:54:34,337 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T12:54:34,337 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T12:54:34,344 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:34,348 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T12:54:34,348 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T12:54:34,348 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T12:54:34,358 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T12:54:34,358 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T12:54:34,362 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730984074358,5,FailOnTimeoutGroup] 2024-11-07T12:54:34,368 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730984074363,5,FailOnTimeoutGroup] 2024-11-07T12:54:34,368 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:34,368 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T12:54:34,369 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:34,369 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:34,381 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(746): ClusterId : ae9d5338-1cef-41f3-a7fd-dc91a0563500 2024-11-07T12:54:34,382 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:54:34,388 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:54:34,389 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:54:34,391 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:54:34,392 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11304fed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9ad1cb6cf9/172.17.0.2:0 2024-11-07T12:54:34,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:54:34,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:54:34,416 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db9ad1cb6cf9:45177 2024-11-07T12:54:34,416 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T12:54:34,416 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T12:54:34,416 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T12:54:34,417 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(2659): reportForDuty to master=db9ad1cb6cf9,41575,1730984073685 with port=45177, startcode=1730984073836 2024-11-07T12:54:34,417 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:54:34,421 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34181, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:54:34,421 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41575 {}] master.ServerManager(363): Checking decommissioned status of RegionServer db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:54:34,421 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41575 {}] master.ServerManager(517): Registering regionserver=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:54:34,423 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc 2024-11-07T12:54:34,423 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33769 2024-11-07T12:54:34,423 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T12:54:34,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:54:34,426 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] zookeeper.ZKUtil(111): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:54:34,426 WARN [RS:0;db9ad1cb6cf9:45177 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:54:34,426 INFO [RS:0;db9ad1cb6cf9:45177 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:54:34,426 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/WALs/db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:54:34,429 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db9ad1cb6cf9,45177,1730984073836] 2024-11-07T12:54:34,436 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:54:34,438 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:54:34,438 INFO [RS:0;db9ad1cb6cf9:45177 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:54:34,438 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:34,441 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T12:54:34,442 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T12:54:34,442 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:34,442 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:54:34,442 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:54:34,442 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:54:34,442 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:54:34,442 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:54:34,442 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db9ad1cb6cf9:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:54:34,442 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:54:34,442 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:54:34,442 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:54:34,443 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:54:34,443 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:54:34,443 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:54:34,443 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db9ad1cb6cf9:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:54:34,443 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:54:34,448 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:34,449 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:34,449 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:34,449 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:34,449 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:34,449 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,45177,1730984073836-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:54:34,469 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:54:34,469 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,45177,1730984073836-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:34,470 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:34,470 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.Replication(171): db9ad1cb6cf9,45177,1730984073836 started 2024-11-07T12:54:34,490 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:34,490 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(1482): Serving as db9ad1cb6cf9,45177,1730984073836, RpcServer on db9ad1cb6cf9/172.17.0.2:45177, sessionid=0x1001a4ee4000001 2024-11-07T12:54:34,490 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:54:34,490 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:54:34,490 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9ad1cb6cf9,45177,1730984073836' 2024-11-07T12:54:34,490 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:54:34,491 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:54:34,492 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:54:34,492 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:54:34,492 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:54:34,492 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9ad1cb6cf9,45177,1730984073836' 2024-11-07T12:54:34,492 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:54:34,493 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:54:34,493 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:54:34,493 INFO [RS:0;db9ad1cb6cf9:45177 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:54:34,493 INFO [RS:0;db9ad1cb6cf9:45177 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:54:34,596 INFO [RS:0;db9ad1cb6cf9:45177 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C45177%2C1730984073836, suffix=, logDir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/WALs/db9ad1cb6cf9,45177,1730984073836, archiveDir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/oldWALs, maxLogs=32 2024-11-07T12:54:34,597 INFO [RS:0;db9ad1cb6cf9:45177 {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C45177%2C1730984073836.1730984074597 2024-11-07T12:54:34,610 INFO [RS:0;db9ad1cb6cf9:45177 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/WALs/db9ad1cb6cf9,45177,1730984073836/db9ad1cb6cf9%2C45177%2C1730984073836.1730984074597 2024-11-07T12:54:34,637 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45039:45039),(127.0.0.1/127.0.0.1:40207:40207)] 2024-11-07T12:54:34,803 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-07T12:54:34,803 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc 2024-11-07T12:54:34,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741833_1009 (size=32) 2024-11-07T12:54:34,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741833_1009 (size=32) 2024-11-07T12:54:34,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:34,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:35,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,203 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,203 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,203 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,203 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,203 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,218 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:54:35,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:54:35,241 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:54:35,241 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:54:35,242 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:54:35,242 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:54:35,244 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:54:35,244 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:54:35,245 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:54:35,245 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:54:35,250 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:54:35,250 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:54:35,250 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:54:35,251 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:54:35,252 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:54:35,252 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:54:35,253 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:54:35,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:54:35,254 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740 2024-11-07T12:54:35,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740 2024-11-07T12:54:35,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:54:35,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:54:35,257 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:54:35,258 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:54:35,275 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:54:35,275 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733106, jitterRate=-0.06780780851840973}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:54:35,276 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1730984075218Initializing all the Stores at 1730984075219 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984075219Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984075237 (+18 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984075237Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984075237Cleaning up temporary data from old regions at 1730984075256 (+19 ms)Region opened successfully at 1730984075276 (+20 ms) 2024-11-07T12:54:35,277 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:54:35,277 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:54:35,277 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:54:35,277 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:54:35,277 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:54:35,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,282 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,282 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,286 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,292 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,292 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,292 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,295 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:54:35,295 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730984075277Disabling compacts and flushes for region at 1730984075277Disabling writes for close at 1730984075277Writing region close event to WAL at 1730984075295 (+18 ms)Closed at 1730984075295 2024-11-07T12:54:35,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,297 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:54:35,297 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-07T12:54:35,297 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T12:54:35,301 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:54:35,303 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T12:54:35,453 DEBUG [db9ad1cb6cf9:41575 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-07T12:54:35,454 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:54:35,456 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9ad1cb6cf9,45177,1730984073836, state=OPENING 2024-11-07T12:54:35,458 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T12:54:35,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:54:35,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:54:35,462 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:54:35,462 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:54:35,465 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:54:35,465 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,45177,1730984073836}] 2024-11-07T12:54:35,619 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T12:54:35,621 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56715, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T12:54:35,626 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-07T12:54:35,626 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:54:35,630 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C45177%2C1730984073836.meta, suffix=.meta, logDir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/WALs/db9ad1cb6cf9,45177,1730984073836, archiveDir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/oldWALs, maxLogs=32 2024-11-07T12:54:35,630 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C45177%2C1730984073836.meta.1730984075630.meta 2024-11-07T12:54:35,637 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/WALs/db9ad1cb6cf9,45177,1730984073836/db9ad1cb6cf9%2C45177%2C1730984073836.meta.1730984075630.meta 2024-11-07T12:54:35,648 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40207:40207),(127.0.0.1/127.0.0.1:45039:45039)] 2024-11-07T12:54:35,652 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:54:35,653 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T12:54:35,653 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T12:54:35,653 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T12:54:35,653 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T12:54:35,653 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:54:35,653 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-07T12:54:35,653 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-07T12:54:35,657 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:54:35,658 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:54:35,658 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:54:35,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:54:35,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:54:35,660 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:54:35,660 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:54:35,660 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:54:35,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:54:35,661 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:54:35,661 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:54:35,662 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:54:35,662 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:54:35,662 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:54:35,662 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:54:35,663 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:54:35,663 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:54:35,664 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740 2024-11-07T12:54:35,668 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740 2024-11-07T12:54:35,669 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:54:35,669 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:54:35,670 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:54:35,671 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:54:35,672 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=697744, jitterRate=-0.11277322471141815}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:54:35,672 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-07T12:54:35,673 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1730984075653Writing region info on filesystem at 1730984075654 (+1 ms)Initializing all the Stores at 1730984075654Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984075654Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984075656 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984075656Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984075656Cleaning up temporary data from old regions at 1730984075669 (+13 ms)Running coprocessor post-open hooks at 1730984075672 (+3 ms)Region opened successfully at 1730984075673 (+1 ms) 2024-11-07T12:54:35,675 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730984075619 2024-11-07T12:54:35,677 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T12:54:35,677 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-07T12:54:35,678 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:54:35,679 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9ad1cb6cf9,45177,1730984073836, state=OPEN 2024-11-07T12:54:35,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:54:35,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:54:35,685 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:54:35,685 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:54:35,685 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:54:35,689 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T12:54:35,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,45177,1730984073836 in 221 msec 2024-11-07T12:54:35,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T12:54:35,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 393 msec 2024-11-07T12:54:35,693 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:54:35,693 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-07T12:54:35,695 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:54:35,695 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=-1] 2024-11-07T12:54:35,695 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:54:35,696 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47455, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:54:35,702 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3920 sec 2024-11-07T12:54:35,703 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730984075703, completionTime=-1 2024-11-07T12:54:35,703 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-07T12:54:35,703 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-07T12:54:35,705 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-07T12:54:35,705 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730984135705 2024-11-07T12:54:35,705 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730984195705 2024-11-07T12:54:35,705 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-07T12:54:35,706 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,41575,1730984073685-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:35,706 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,41575,1730984073685-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:35,706 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,41575,1730984073685-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:35,706 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db9ad1cb6cf9:41575, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:35,706 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:35,708 DEBUG [master/db9ad1cb6cf9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-07T12:54:35,708 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:35,712 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.802sec 2024-11-07T12:54:35,712 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T12:54:35,712 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T12:54:35,712 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T12:54:35,712 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T12:54:35,712 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T12:54:35,712 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,41575,1730984073685-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:54:35,712 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,41575,1730984073685-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T12:54:35,714 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-07T12:54:35,714 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T12:54:35,714 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,41575,1730984073685-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:54:35,783 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f61588, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:54:35,783 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request db9ad1cb6cf9,41575,-1 for getting cluster id 2024-11-07T12:54:35,784 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-07T12:54:35,793 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ae9d5338-1cef-41f3-a7fd-dc91a0563500' 2024-11-07T12:54:35,793 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-07T12:54:35,794 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ae9d5338-1cef-41f3-a7fd-dc91a0563500" 2024-11-07T12:54:35,794 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3057db0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:54:35,794 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [db9ad1cb6cf9,41575,-1] 2024-11-07T12:54:35,794 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-07T12:54:35,795 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:54:35,796 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33944, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-07T12:54:35,797 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@444decb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:54:35,797 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:54:35,798 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=-1] 2024-11-07T12:54:35,799 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:54:35,800 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47136, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:54:35,802 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=db9ad1cb6cf9,41575,1730984073685 2024-11-07T12:54:35,802 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:54:35,805 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-07T12:54:35,805 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-07T12:54:35,806 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is db9ad1cb6cf9,41575,1730984073685 2024-11-07T12:54:35,806 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@622747d6 2024-11-07T12:54:35,806 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T12:54:35,807 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33958, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T12:54:35,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41575 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-07T12:54:35,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41575 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-07T12:54:35,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41575 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:54:35,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41575 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-07T12:54:35,810 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:54:35,812 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T12:54:35,812 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:54:35,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,813 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41575 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-07T12:54:35,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41575 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:54:35,815 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T12:54:35,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,844 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,849 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,849 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:35,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741835_1011 (size=381) 2024-11-07T12:54:35,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741835_1011 (size=381) 2024-11-07T12:54:35,894 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d29ad1038af06888c50aff34ddab2741, NAME => 'TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc 2024-11-07T12:54:35,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741836_1012 (size=64) 2024-11-07T12:54:35,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741836_1012 (size=64) 2024-11-07T12:54:35,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:35,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:36,349 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:54:36,350 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing d29ad1038af06888c50aff34ddab2741, disabling compactions & flushes 2024-11-07T12:54:36,350 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. 2024-11-07T12:54:36,350 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. 2024-11-07T12:54:36,350 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. after waiting 0 ms 2024-11-07T12:54:36,350 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. 2024-11-07T12:54:36,350 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. 2024-11-07T12:54:36,350 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for d29ad1038af06888c50aff34ddab2741: Waiting for close lock at 1730984076349Disabling compacts and flushes for region at 1730984076349Disabling writes for close at 1730984076350 (+1 ms)Writing region close event to WAL at 1730984076350Closed at 1730984076350 2024-11-07T12:54:36,352 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T12:54:36,352 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1730984076352"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730984076352"}]},"ts":"1730984076352"} 2024-11-07T12:54:36,356 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-07T12:54:36,357 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T12:54:36,357 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730984076357"}]},"ts":"1730984076357"} 2024-11-07T12:54:36,362 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-07T12:54:36,362 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d29ad1038af06888c50aff34ddab2741, ASSIGN}] 2024-11-07T12:54:36,366 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d29ad1038af06888c50aff34ddab2741, ASSIGN 2024-11-07T12:54:36,367 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d29ad1038af06888c50aff34ddab2741, ASSIGN; state=OFFLINE, location=db9ad1cb6cf9,45177,1730984073836; forceNewPlan=false, retain=false 2024-11-07T12:54:36,518 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d29ad1038af06888c50aff34ddab2741, regionState=OPENING, regionLocation=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:54:36,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d29ad1038af06888c50aff34ddab2741, ASSIGN because future has completed 2024-11-07T12:54:36,522 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d29ad1038af06888c50aff34ddab2741, server=db9ad1cb6cf9,45177,1730984073836}] 2024-11-07T12:54:36,682 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. 2024-11-07T12:54:36,682 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d29ad1038af06888c50aff34ddab2741, NAME => 'TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:54:36,683 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling d29ad1038af06888c50aff34ddab2741 2024-11-07T12:54:36,683 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:54:36,683 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d29ad1038af06888c50aff34ddab2741 2024-11-07T12:54:36,683 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d29ad1038af06888c50aff34ddab2741 2024-11-07T12:54:36,690 INFO [StoreOpener-d29ad1038af06888c50aff34ddab2741-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d29ad1038af06888c50aff34ddab2741 2024-11-07T12:54:36,692 INFO [StoreOpener-d29ad1038af06888c50aff34ddab2741-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d29ad1038af06888c50aff34ddab2741 columnFamilyName info 2024-11-07T12:54:36,692 DEBUG [StoreOpener-d29ad1038af06888c50aff34ddab2741-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:54:36,693 INFO [StoreOpener-d29ad1038af06888c50aff34ddab2741-1 {}] regionserver.HStore(327): Store=d29ad1038af06888c50aff34ddab2741/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:54:36,693 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d29ad1038af06888c50aff34ddab2741 2024-11-07T12:54:36,694 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741 2024-11-07T12:54:36,694 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741 2024-11-07T12:54:36,694 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d29ad1038af06888c50aff34ddab2741 2024-11-07T12:54:36,694 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d29ad1038af06888c50aff34ddab2741 2024-11-07T12:54:36,697 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d29ad1038af06888c50aff34ddab2741 2024-11-07T12:54:36,703 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:54:36,704 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d29ad1038af06888c50aff34ddab2741; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773742, jitterRate=-0.016137078404426575}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T12:54:36,704 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d29ad1038af06888c50aff34ddab2741 2024-11-07T12:54:36,705 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d29ad1038af06888c50aff34ddab2741: Running coprocessor pre-open hook at 1730984076683Writing region info on filesystem at 1730984076683Initializing all the Stores at 1730984076686 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984076686Cleaning up temporary data from old regions at 1730984076694 (+8 ms)Running coprocessor post-open hooks at 1730984076704 (+10 ms)Region opened successfully at 1730984076705 (+1 ms) 2024-11-07T12:54:36,706 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., pid=6, masterSystemTime=1730984076677 2024-11-07T12:54:36,709 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. 2024-11-07T12:54:36,709 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. 2024-11-07T12:54:36,710 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d29ad1038af06888c50aff34ddab2741, regionState=OPEN, openSeqNum=2, regionLocation=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:54:36,714 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d29ad1038af06888c50aff34ddab2741, server=db9ad1cb6cf9,45177,1730984073836 because future has completed 2024-11-07T12:54:36,726 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-07T12:54:36,726 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d29ad1038af06888c50aff34ddab2741, server=db9ad1cb6cf9,45177,1730984073836 in 200 msec 2024-11-07T12:54:36,737 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-07T12:54:36,737 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d29ad1038af06888c50aff34ddab2741, ASSIGN in 364 msec 2024-11-07T12:54:36,739 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T12:54:36,739 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730984076739"}]},"ts":"1730984076739"} 2024-11-07T12:54:36,742 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-07T12:54:36,743 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T12:54:36,751 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 935 msec 2024-11-07T12:54:36,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:36,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:37,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:37,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:38,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:38,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:39,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:39,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:40,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-07T12:54:40,000 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-07T12:54:40,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-07T12:54:40,436 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-07T12:54:40,436 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-07T12:54:40,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:40,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:41,155 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:54:41,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,158 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,215 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,216 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,216 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:54:41,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:41,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:42,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:42,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:43,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:43,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:44,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:44,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:45,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41575 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-07T12:54:45,904 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-07T12:54:45,904 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-07T12:54:45,908 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-07T12:54:45,908 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. 2024-11-07T12:54:45,911 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=2] 2024-11-07T12:54:45,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on d29ad1038af06888c50aff34ddab2741 2024-11-07T12:54:45,924 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d29ad1038af06888c50aff34ddab2741 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:54:45,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/8dacdfc7127d487ab0e1bdd3b73d0497 is 1080, key is row0001/info:/1730984085912/Put/seqid=0 2024-11-07T12:54:45,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d29ad1038af06888c50aff34ddab2741, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-07T12:54:45,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:47136 deadline: 1730984095972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d29ad1038af06888c50aff34ddab2741, server=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:54:45,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:45,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:45,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741837_1013 (size=12509) 2024-11-07T12:54:45,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/8dacdfc7127d487ab0e1bdd3b73d0497 2024-11-07T12:54:45,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741837_1013 (size=12509) 2024-11-07T12:54:45,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/8dacdfc7127d487ab0e1bdd3b73d0497 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/8dacdfc7127d487ab0e1bdd3b73d0497 2024-11-07T12:54:45,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/8dacdfc7127d487ab0e1bdd3b73d0497, entries=7, sequenceid=11, filesize=12.2 K 2024-11-07T12:54:45,999 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for d29ad1038af06888c50aff34ddab2741 in 75ms, sequenceid=11, compaction requested=false 2024-11-07T12:54:45,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d29ad1038af06888c50aff34ddab2741: 2024-11-07T12:54:46,005 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d29ad1038af06888c50aff34ddab2741, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T12:54:46,005 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d29ad1038af06888c50aff34ddab2741, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T12:54:46,005 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=2 because the exception is null or not the one we care about 2024-11-07T12:54:46,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:46,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:47,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:47,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:48,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:48,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:49,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:49,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:50,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:50,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:51,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:51,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:52,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:52,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:53,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:53,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:54,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:54,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:55,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:55,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:56,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on d29ad1038af06888c50aff34ddab2741 2024-11-07T12:54:56,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d29ad1038af06888c50aff34ddab2741 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-07T12:54:56,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/e9ff8e8e15be4f7e9447e00f247e7e30 is 1080, key is row0008/info:/1730984085925/Put/seqid=0 2024-11-07T12:54:56,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741838_1014 (size=29761) 2024-11-07T12:54:56,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741838_1014 (size=29761) 2024-11-07T12:54:56,024 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/e9ff8e8e15be4f7e9447e00f247e7e30 2024-11-07T12:54:56,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/e9ff8e8e15be4f7e9447e00f247e7e30 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/e9ff8e8e15be4f7e9447e00f247e7e30 2024-11-07T12:54:56,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/e9ff8e8e15be4f7e9447e00f247e7e30, entries=23, sequenceid=37, filesize=29.1 K 2024-11-07T12:54:56,036 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for d29ad1038af06888c50aff34ddab2741 in 22ms, sequenceid=37, compaction requested=false 2024-11-07T12:54:56,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d29ad1038af06888c50aff34ddab2741: 2024-11-07T12:54:56,036 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-07T12:54:56,036 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:54:56,036 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/e9ff8e8e15be4f7e9447e00f247e7e30 because midkey is the same as first or last row 2024-11-07T12:54:56,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:56,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:57,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:57,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:58,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on d29ad1038af06888c50aff34ddab2741 2024-11-07T12:54:58,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d29ad1038af06888c50aff34ddab2741 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:54:58,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/2ef7fd6d06cd499c8f5c18e3e043f11e is 1080, key is row0031/info:/1730984096015/Put/seqid=0 2024-11-07T12:54:58,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741839_1015 (size=12509) 2024-11-07T12:54:58,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741839_1015 (size=12509) 2024-11-07T12:54:58,061 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/2ef7fd6d06cd499c8f5c18e3e043f11e 2024-11-07T12:54:58,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/2ef7fd6d06cd499c8f5c18e3e043f11e as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/2ef7fd6d06cd499c8f5c18e3e043f11e 2024-11-07T12:54:58,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/2ef7fd6d06cd499c8f5c18e3e043f11e, entries=7, sequenceid=47, filesize=12.2 K 2024-11-07T12:54:58,078 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for d29ad1038af06888c50aff34ddab2741 in 47ms, sequenceid=47, compaction requested=true 2024-11-07T12:54:58,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d29ad1038af06888c50aff34ddab2741: 2024-11-07T12:54:58,078 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-07T12:54:58,078 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:54:58,078 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/e9ff8e8e15be4f7e9447e00f247e7e30 because midkey is the same as first or last row 2024-11-07T12:54:58,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on d29ad1038af06888c50aff34ddab2741 2024-11-07T12:54:58,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d29ad1038af06888c50aff34ddab2741:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:54:58,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:54:58,088 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d29ad1038af06888c50aff34ddab2741 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-07T12:54:58,089 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:54:58,090 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:54:58,091 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1541): d29ad1038af06888c50aff34ddab2741/info is initiating minor compaction (all files) 2024-11-07T12:54:58,091 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d29ad1038af06888c50aff34ddab2741/info in TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. 2024-11-07T12:54:58,091 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/8dacdfc7127d487ab0e1bdd3b73d0497, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/e9ff8e8e15be4f7e9447e00f247e7e30, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/2ef7fd6d06cd499c8f5c18e3e043f11e] into tmpdir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp, totalSize=53.5 K 2024-11-07T12:54:58,092 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8dacdfc7127d487ab0e1bdd3b73d0497, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1730984085912 2024-11-07T12:54:58,093 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting e9ff8e8e15be4f7e9447e00f247e7e30, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1730984085925 2024-11-07T12:54:58,094 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2ef7fd6d06cd499c8f5c18e3e043f11e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1730984096015 2024-11-07T12:54:58,121 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/631d66c42f244ec0a0e688d49bd5dcb2 is 1080, key is row0038/info:/1730984098035/Put/seqid=0 2024-11-07T12:54:58,156 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d29ad1038af06888c50aff34ddab2741#info#compaction#59 average throughput is 12.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:54:58,157 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/5d0a53d1819f424293f4d2eda1a43d75 is 1080, key is row0001/info:/1730984085912/Put/seqid=0 2024-11-07T12:54:58,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741840_1016 (size=22222) 2024-11-07T12:54:58,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741840_1016 (size=22222) 2024-11-07T12:54:58,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/631d66c42f244ec0a0e688d49bd5dcb2 2024-11-07T12:54:58,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/631d66c42f244ec0a0e688d49bd5dcb2 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/631d66c42f244ec0a0e688d49bd5dcb2 2024-11-07T12:54:58,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/631d66c42f244ec0a0e688d49bd5dcb2, entries=16, sequenceid=66, filesize=21.7 K 2024-11-07T12:54:58,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=11.56 KB/11836 for d29ad1038af06888c50aff34ddab2741 in 134ms, sequenceid=66, compaction requested=false 2024-11-07T12:54:58,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d29ad1038af06888c50aff34ddab2741: 2024-11-07T12:54:58,222 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=75.2 K, sizeToCheck=16.0 K 2024-11-07T12:54:58,222 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:54:58,222 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/e9ff8e8e15be4f7e9447e00f247e7e30 because midkey is the same as first or last row 2024-11-07T12:54:58,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741841_1017 (size=44978) 2024-11-07T12:54:58,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741841_1017 (size=44978) 2024-11-07T12:54:58,255 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/5d0a53d1819f424293f4d2eda1a43d75 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/5d0a53d1819f424293f4d2eda1a43d75 2024-11-07T12:54:58,270 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d29ad1038af06888c50aff34ddab2741/info of d29ad1038af06888c50aff34ddab2741 into 5d0a53d1819f424293f4d2eda1a43d75(size=43.9 K), total size for store is 65.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:54:58,270 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d29ad1038af06888c50aff34ddab2741: 2024-11-07T12:54:58,270 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., storeName=d29ad1038af06888c50aff34ddab2741/info, priority=13, startTime=1730984098078; duration=0sec 2024-11-07T12:54:58,270 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.6 K, sizeToCheck=16.0 K 2024-11-07T12:54:58,271 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:54:58,271 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/5d0a53d1819f424293f4d2eda1a43d75 because midkey is the same as first or last row 2024-11-07T12:54:58,271 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.6 K, sizeToCheck=16.0 K 2024-11-07T12:54:58,271 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:54:58,271 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/5d0a53d1819f424293f4d2eda1a43d75 because midkey is the same as first or last row 2024-11-07T12:54:58,271 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.6 K, sizeToCheck=16.0 K 2024-11-07T12:54:58,271 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:54:58,271 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/5d0a53d1819f424293f4d2eda1a43d75 because midkey is the same as first or last row 2024-11-07T12:54:58,271 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:54:58,271 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d29ad1038af06888c50aff34ddab2741:info 2024-11-07T12:54:58,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:58,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:59,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:54:59,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on d29ad1038af06888c50aff34ddab2741 2024-11-07T12:55:00,116 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d29ad1038af06888c50aff34ddab2741 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-07T12:55:00,122 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/3f7319cebbb94485b53847f20de7a9a8 is 1080, key is row0054/info:/1730984098090/Put/seqid=0 2024-11-07T12:55:00,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741842_1018 (size=17894) 2024-11-07T12:55:00,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741842_1018 (size=17894) 2024-11-07T12:55:00,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/3f7319cebbb94485b53847f20de7a9a8 2024-11-07T12:55:00,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/3f7319cebbb94485b53847f20de7a9a8 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/3f7319cebbb94485b53847f20de7a9a8 2024-11-07T12:55:00,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45177 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d29ad1038af06888c50aff34ddab2741, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-07T12:55:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45177 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:47136 deadline: 1730984110171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d29ad1038af06888c50aff34ddab2741, server=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:55:00,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/3f7319cebbb94485b53847f20de7a9a8, entries=12, sequenceid=82, filesize=17.5 K 2024-11-07T12:55:00,173 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d29ad1038af06888c50aff34ddab2741, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T12:55:00,173 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d29ad1038af06888c50aff34ddab2741, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T12:55:00,173 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=2 because the exception is null or not the one we care about 2024-11-07T12:55:00,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=17.86 KB/18292 for d29ad1038af06888c50aff34ddab2741 in 58ms, sequenceid=82, compaction requested=true 2024-11-07T12:55:00,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d29ad1038af06888c50aff34ddab2741: 2024-11-07T12:55:00,174 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-07T12:55:00,174 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:55:00,174 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/5d0a53d1819f424293f4d2eda1a43d75 because midkey is the same as first or last row 2024-11-07T12:55:00,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d29ad1038af06888c50aff34ddab2741:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:55:00,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:55:00,174 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:55:00,176 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:55:00,176 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1541): d29ad1038af06888c50aff34ddab2741/info is initiating minor compaction (all files) 2024-11-07T12:55:00,176 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d29ad1038af06888c50aff34ddab2741/info in TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. 2024-11-07T12:55:00,176 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/5d0a53d1819f424293f4d2eda1a43d75, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/631d66c42f244ec0a0e688d49bd5dcb2, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/3f7319cebbb94485b53847f20de7a9a8] into tmpdir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp, totalSize=83.1 K 2024-11-07T12:55:00,177 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5d0a53d1819f424293f4d2eda1a43d75, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1730984085912 2024-11-07T12:55:00,177 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting 631d66c42f244ec0a0e688d49bd5dcb2, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1730984098035 2024-11-07T12:55:00,178 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3f7319cebbb94485b53847f20de7a9a8, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1730984098090 2024-11-07T12:55:00,223 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d29ad1038af06888c50aff34ddab2741#info#compaction#61 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:55:00,224 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/f90a3e7564904f1ea3c27d6b96794bbf is 1080, key is row0001/info:/1730984085912/Put/seqid=0 2024-11-07T12:55:00,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741843_1019 (size=75378) 2024-11-07T12:55:00,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741843_1019 (size=75378) 2024-11-07T12:55:00,264 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/f90a3e7564904f1ea3c27d6b96794bbf as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/f90a3e7564904f1ea3c27d6b96794bbf 2024-11-07T12:55:00,272 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d29ad1038af06888c50aff34ddab2741/info of d29ad1038af06888c50aff34ddab2741 into f90a3e7564904f1ea3c27d6b96794bbf(size=73.6 K), total size for store is 73.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:55:00,272 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d29ad1038af06888c50aff34ddab2741: 2024-11-07T12:55:00,272 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., storeName=d29ad1038af06888c50aff34ddab2741/info, priority=13, startTime=1730984100174; duration=0sec 2024-11-07T12:55:00,272 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-11-07T12:55:00,272 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:55:00,272 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-11-07T12:55:00,272 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:55:00,272 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-11-07T12:55:00,272 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-07T12:55:00,281 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:55:00,281 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:55:00,281 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d29ad1038af06888c50aff34ddab2741:info 2024-11-07T12:55:00,283 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41575 {}] assignment.AssignmentManager(1363): Split request from db9ad1cb6cf9,45177,1730984073836, parent={ENCODED => d29ad1038af06888c50aff34ddab2741, NAME => 'TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-07T12:55:00,289 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41575 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:55:00,293 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41575 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d29ad1038af06888c50aff34ddab2741, daughterA=3003995566c0f4b3d16fdd754e25e4c8, daughterB=b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:00,295 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d29ad1038af06888c50aff34ddab2741, daughterA=3003995566c0f4b3d16fdd754e25e4c8, daughterB=b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:00,295 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d29ad1038af06888c50aff34ddab2741, daughterA=3003995566c0f4b3d16fdd754e25e4c8, daughterB=b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:00,295 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d29ad1038af06888c50aff34ddab2741, daughterA=3003995566c0f4b3d16fdd754e25e4c8, daughterB=b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:00,306 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d29ad1038af06888c50aff34ddab2741, UNASSIGN}] 2024-11-07T12:55:00,308 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d29ad1038af06888c50aff34ddab2741, UNASSIGN 2024-11-07T12:55:00,309 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=d29ad1038af06888c50aff34ddab2741, regionState=CLOSING, regionLocation=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:55:00,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d29ad1038af06888c50aff34ddab2741, UNASSIGN because future has completed 2024-11-07T12:55:00,331 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-07T12:55:00,332 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure d29ad1038af06888c50aff34ddab2741, server=db9ad1cb6cf9,45177,1730984073836}] 2024-11-07T12:55:00,492 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close d29ad1038af06888c50aff34ddab2741 2024-11-07T12:55:00,492 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-07T12:55:00,492 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing d29ad1038af06888c50aff34ddab2741, disabling compactions & flushes 2024-11-07T12:55:00,493 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. 2024-11-07T12:55:00,493 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. 2024-11-07T12:55:00,493 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. after waiting 0 ms 2024-11-07T12:55:00,493 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. 2024-11-07T12:55:00,493 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing d29ad1038af06888c50aff34ddab2741 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-07T12:55:00,498 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/95a0df22c55b44bd951ebfd96c7b2bb9 is 1080, key is row0066/info:/1730984100118/Put/seqid=0 2024-11-07T12:55:00,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741844_1020 (size=23299) 2024-11-07T12:55:00,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741844_1020 (size=23299) 2024-11-07T12:55:00,520 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/95a0df22c55b44bd951ebfd96c7b2bb9 2024-11-07T12:55:00,528 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/.tmp/info/95a0df22c55b44bd951ebfd96c7b2bb9 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/95a0df22c55b44bd951ebfd96c7b2bb9 2024-11-07T12:55:00,544 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/95a0df22c55b44bd951ebfd96c7b2bb9, entries=17, sequenceid=103, filesize=22.8 K 2024-11-07T12:55:00,545 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=0 B/0 for d29ad1038af06888c50aff34ddab2741 in 52ms, sequenceid=103, compaction requested=false 2024-11-07T12:55:00,547 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/8dacdfc7127d487ab0e1bdd3b73d0497, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/e9ff8e8e15be4f7e9447e00f247e7e30, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/5d0a53d1819f424293f4d2eda1a43d75, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/2ef7fd6d06cd499c8f5c18e3e043f11e, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/631d66c42f244ec0a0e688d49bd5dcb2, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/3f7319cebbb94485b53847f20de7a9a8] to archive 2024-11-07T12:55:00,548 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T12:55:00,550 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/8dacdfc7127d487ab0e1bdd3b73d0497 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/8dacdfc7127d487ab0e1bdd3b73d0497 2024-11-07T12:55:00,552 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/e9ff8e8e15be4f7e9447e00f247e7e30 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/e9ff8e8e15be4f7e9447e00f247e7e30 2024-11-07T12:55:00,553 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/5d0a53d1819f424293f4d2eda1a43d75 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/5d0a53d1819f424293f4d2eda1a43d75 2024-11-07T12:55:00,555 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/2ef7fd6d06cd499c8f5c18e3e043f11e to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/2ef7fd6d06cd499c8f5c18e3e043f11e 2024-11-07T12:55:00,556 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/631d66c42f244ec0a0e688d49bd5dcb2 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/631d66c42f244ec0a0e688d49bd5dcb2 2024-11-07T12:55:00,557 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/3f7319cebbb94485b53847f20de7a9a8 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/3f7319cebbb94485b53847f20de7a9a8 2024-11-07T12:55:00,565 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/recovered.edits/106.seqid, newMaxSeqId=106, maxSeqId=1 2024-11-07T12:55:00,565 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. 2024-11-07T12:55:00,566 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for d29ad1038af06888c50aff34ddab2741: Waiting for close lock at 1730984100492Running coprocessor pre-close hooks at 1730984100492Disabling compacts and flushes for region at 1730984100492Disabling writes for close at 1730984100493 (+1 ms)Obtaining lock to block concurrent updates at 1730984100493Preparing flush snapshotting stores in d29ad1038af06888c50aff34ddab2741 at 1730984100493Finished memstore snapshotting TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., syncing WAL and waiting on mvcc, flushsize=dataSize=18292, getHeapSize=19824, getOffHeapSize=0, getCellsCount=17 at 1730984100493Flushing stores of TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. at 1730984100494 (+1 ms)Flushing d29ad1038af06888c50aff34ddab2741/info: creating writer at 1730984100494Flushing d29ad1038af06888c50aff34ddab2741/info: appending metadata at 1730984100498 (+4 ms)Flushing d29ad1038af06888c50aff34ddab2741/info: closing flushed file at 1730984100498Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58ffc74d: reopening flushed file at 1730984100527 (+29 ms)Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=0 B/0 for d29ad1038af06888c50aff34ddab2741 in 52ms, sequenceid=103, compaction requested=false at 1730984100545 (+18 ms)Writing region close event to WAL at 1730984100561 (+16 ms)Running coprocessor post-close hooks at 1730984100565 (+4 ms)Closed at 1730984100565 2024-11-07T12:55:00,568 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed d29ad1038af06888c50aff34ddab2741 2024-11-07T12:55:00,569 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=d29ad1038af06888c50aff34ddab2741, regionState=CLOSED 2024-11-07T12:55:00,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure d29ad1038af06888c50aff34ddab2741, server=db9ad1cb6cf9,45177,1730984073836 because future has completed 2024-11-07T12:55:00,575 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-07T12:55:00,575 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure d29ad1038af06888c50aff34ddab2741, server=db9ad1cb6cf9,45177,1730984073836 in 242 msec 2024-11-07T12:55:00,577 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-07T12:55:00,578 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d29ad1038af06888c50aff34ddab2741, UNASSIGN in 269 msec 2024-11-07T12:55:00,592 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:55:00,603 INFO [PEWorker-1 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=d29ad1038af06888c50aff34ddab2741, threads=2 2024-11-07T12:55:00,605 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/95a0df22c55b44bd951ebfd96c7b2bb9 for region: d29ad1038af06888c50aff34ddab2741 2024-11-07T12:55:00,605 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/f90a3e7564904f1ea3c27d6b96794bbf for region: d29ad1038af06888c50aff34ddab2741 2024-11-07T12:55:00,618 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/95a0df22c55b44bd951ebfd96c7b2bb9, top=true 2024-11-07T12:55:00,637 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/TestLogRolling-testLogRolling=d29ad1038af06888c50aff34ddab2741-95a0df22c55b44bd951ebfd96c7b2bb9 for child: b8213e392b91eb8b27278f2821f7f941, parent: d29ad1038af06888c50aff34ddab2741 2024-11-07T12:55:00,638 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/95a0df22c55b44bd951ebfd96c7b2bb9 for region: d29ad1038af06888c50aff34ddab2741 2024-11-07T12:55:00,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741845_1021 (size=27) 2024-11-07T12:55:00,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741845_1021 (size=27) 2024-11-07T12:55:00,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741846_1022 (size=27) 2024-11-07T12:55:00,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741846_1022 (size=27) 2024-11-07T12:55:00,656 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/f90a3e7564904f1ea3c27d6b96794bbf for region: d29ad1038af06888c50aff34ddab2741 2024-11-07T12:55:00,659 DEBUG [PEWorker-1 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region d29ad1038af06888c50aff34ddab2741 Daughter A: [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/3003995566c0f4b3d16fdd754e25e4c8/info/f90a3e7564904f1ea3c27d6b96794bbf.d29ad1038af06888c50aff34ddab2741] storefiles, Daughter B: [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/TestLogRolling-testLogRolling=d29ad1038af06888c50aff34ddab2741-95a0df22c55b44bd951ebfd96c7b2bb9, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/f90a3e7564904f1ea3c27d6b96794bbf.d29ad1038af06888c50aff34ddab2741] storefiles. 2024-11-07T12:55:00,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741847_1023 (size=71) 2024-11-07T12:55:00,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741847_1023 (size=71) 2024-11-07T12:55:00,710 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:55:00,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741848_1024 (size=71) 2024-11-07T12:55:00,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741848_1024 (size=71) 2024-11-07T12:55:00,754 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:55:00,770 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/3003995566c0f4b3d16fdd754e25e4c8/recovered.edits/106.seqid, newMaxSeqId=106, maxSeqId=-1 2024-11-07T12:55:00,774 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/recovered.edits/106.seqid, newMaxSeqId=106, maxSeqId=-1 2024-11-07T12:55:00,778 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1730984100777"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1730984100777"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1730984100777"}]},"ts":"1730984100777"} 2024-11-07T12:55:00,778 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1730984100777"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730984100777"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1730984100777"}]},"ts":"1730984100777"} 2024-11-07T12:55:00,778 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1730984100777"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730984100777"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1730984100777"}]},"ts":"1730984100777"} 2024-11-07T12:55:00,805 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3003995566c0f4b3d16fdd754e25e4c8, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b8213e392b91eb8b27278f2821f7f941, ASSIGN}] 2024-11-07T12:55:00,808 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3003995566c0f4b3d16fdd754e25e4c8, ASSIGN 2024-11-07T12:55:00,809 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b8213e392b91eb8b27278f2821f7f941, ASSIGN 2024-11-07T12:55:00,809 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3003995566c0f4b3d16fdd754e25e4c8, ASSIGN; state=SPLITTING_NEW, location=db9ad1cb6cf9,45177,1730984073836; forceNewPlan=false, retain=false 2024-11-07T12:55:00,810 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b8213e392b91eb8b27278f2821f7f941, ASSIGN; state=SPLITTING_NEW, location=db9ad1cb6cf9,45177,1730984073836; forceNewPlan=false, retain=false 2024-11-07T12:55:00,961 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=b8213e392b91eb8b27278f2821f7f941, regionState=OPENING, regionLocation=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:55:00,961 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=3003995566c0f4b3d16fdd754e25e4c8, regionState=OPENING, regionLocation=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:55:00,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b8213e392b91eb8b27278f2821f7f941, ASSIGN because future has completed 2024-11-07T12:55:00,966 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836}] 2024-11-07T12:55:00,967 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3003995566c0f4b3d16fdd754e25e4c8, ASSIGN because future has completed 2024-11-07T12:55:00,968 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3003995566c0f4b3d16fdd754e25e4c8, server=db9ad1cb6cf9,45177,1730984073836}] 2024-11-07T12:55:00,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:00,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:01,124 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8. 2024-11-07T12:55:01,124 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 3003995566c0f4b3d16fdd754e25e4c8, NAME => 'TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-07T12:55:01,124 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 3003995566c0f4b3d16fdd754e25e4c8 2024-11-07T12:55:01,124 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:55:01,124 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 3003995566c0f4b3d16fdd754e25e4c8 2024-11-07T12:55:01,124 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 3003995566c0f4b3d16fdd754e25e4c8 2024-11-07T12:55:01,126 INFO [StoreOpener-3003995566c0f4b3d16fdd754e25e4c8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3003995566c0f4b3d16fdd754e25e4c8 2024-11-07T12:55:01,127 INFO [StoreOpener-3003995566c0f4b3d16fdd754e25e4c8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3003995566c0f4b3d16fdd754e25e4c8 columnFamilyName info 2024-11-07T12:55:01,127 DEBUG [StoreOpener-3003995566c0f4b3d16fdd754e25e4c8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:55:01,141 DEBUG [StoreOpener-3003995566c0f4b3d16fdd754e25e4c8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/3003995566c0f4b3d16fdd754e25e4c8/info/f90a3e7564904f1ea3c27d6b96794bbf.d29ad1038af06888c50aff34ddab2741->hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/f90a3e7564904f1ea3c27d6b96794bbf-bottom 2024-11-07T12:55:01,141 INFO [StoreOpener-3003995566c0f4b3d16fdd754e25e4c8-1 {}] regionserver.HStore(327): Store=3003995566c0f4b3d16fdd754e25e4c8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:55:01,142 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 3003995566c0f4b3d16fdd754e25e4c8 2024-11-07T12:55:01,142 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/3003995566c0f4b3d16fdd754e25e4c8 2024-11-07T12:55:01,144 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/3003995566c0f4b3d16fdd754e25e4c8 2024-11-07T12:55:01,144 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 3003995566c0f4b3d16fdd754e25e4c8 2024-11-07T12:55:01,144 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 3003995566c0f4b3d16fdd754e25e4c8 2024-11-07T12:55:01,146 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 3003995566c0f4b3d16fdd754e25e4c8 2024-11-07T12:55:01,147 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 3003995566c0f4b3d16fdd754e25e4c8; next sequenceid=107; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696623, jitterRate=-0.11419929563999176}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T12:55:01,147 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3003995566c0f4b3d16fdd754e25e4c8 2024-11-07T12:55:01,147 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 3003995566c0f4b3d16fdd754e25e4c8: Running coprocessor pre-open hook at 1730984101125Writing region info on filesystem at 1730984101125Initializing all the Stores at 1730984101125Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984101126 (+1 ms)Cleaning up temporary data from old regions at 1730984101144 (+18 ms)Running coprocessor post-open hooks at 1730984101147 (+3 ms)Region opened successfully at 1730984101147 2024-11-07T12:55:01,149 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8., pid=13, masterSystemTime=1730984101119 2024-11-07T12:55:01,149 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-07T12:55:01,149 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 3003995566c0f4b3d16fdd754e25e4c8:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:55:01,149 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:55:01,150 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8. 2024-11-07T12:55:01,150 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1541): 3003995566c0f4b3d16fdd754e25e4c8/info is initiating minor compaction (all files) 2024-11-07T12:55:01,150 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3003995566c0f4b3d16fdd754e25e4c8/info in TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8. 2024-11-07T12:55:01,150 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/3003995566c0f4b3d16fdd754e25e4c8/info/f90a3e7564904f1ea3c27d6b96794bbf.d29ad1038af06888c50aff34ddab2741->hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/f90a3e7564904f1ea3c27d6b96794bbf-bottom] into tmpdir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/3003995566c0f4b3d16fdd754e25e4c8/.tmp, totalSize=73.6 K 2024-11-07T12:55:01,151 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting f90a3e7564904f1ea3c27d6b96794bbf.d29ad1038af06888c50aff34ddab2741, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1730984085912 2024-11-07T12:55:01,152 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8. 2024-11-07T12:55:01,152 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8. 2024-11-07T12:55:01,152 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. 2024-11-07T12:55:01,153 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => b8213e392b91eb8b27278f2821f7f941, NAME => 'TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-07T12:55:01,153 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=3003995566c0f4b3d16fdd754e25e4c8, regionState=OPEN, openSeqNum=107, regionLocation=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:55:01,153 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:01,153 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:55:01,153 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:01,153 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:01,155 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-07T12:55:01,155 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-07T12:55:01,155 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-07T12:55:01,155 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3003995566c0f4b3d16fdd754e25e4c8, server=db9ad1cb6cf9,45177,1730984073836 because future has completed 2024-11-07T12:55:01,159 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-07T12:55:01,159 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 3003995566c0f4b3d16fdd754e25e4c8, server=db9ad1cb6cf9,45177,1730984073836 in 189 msec 2024-11-07T12:55:01,163 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3003995566c0f4b3d16fdd754e25e4c8, ASSIGN in 355 msec 2024-11-07T12:55:01,173 INFO [StoreOpener-b8213e392b91eb8b27278f2821f7f941-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:01,177 INFO [StoreOpener-b8213e392b91eb8b27278f2821f7f941-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b8213e392b91eb8b27278f2821f7f941 columnFamilyName info 2024-11-07T12:55:01,177 DEBUG [StoreOpener-b8213e392b91eb8b27278f2821f7f941-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:55:01,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/.tmp/info/4bc07d62318f460380783d2400621d70 is 193, key is TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941./info:regioninfo/1730984100960/Put/seqid=0 2024-11-07T12:55:01,202 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3003995566c0f4b3d16fdd754e25e4c8#info#compaction#64 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:55:01,204 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/3003995566c0f4b3d16fdd754e25e4c8/.tmp/info/dac9de8d92764f6a8924f45a031a7ee2 is 1080, key is row0001/info:/1730984085912/Put/seqid=0 2024-11-07T12:55:01,207 DEBUG [StoreOpener-b8213e392b91eb8b27278f2821f7f941-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/TestLogRolling-testLogRolling=d29ad1038af06888c50aff34ddab2741-95a0df22c55b44bd951ebfd96c7b2bb9 2024-11-07T12:55:01,215 DEBUG [StoreOpener-b8213e392b91eb8b27278f2821f7f941-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/f90a3e7564904f1ea3c27d6b96794bbf.d29ad1038af06888c50aff34ddab2741->hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/f90a3e7564904f1ea3c27d6b96794bbf-top 2024-11-07T12:55:01,215 INFO [StoreOpener-b8213e392b91eb8b27278f2821f7f941-1 {}] regionserver.HStore(327): Store=b8213e392b91eb8b27278f2821f7f941/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:55:01,215 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:01,216 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:01,218 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:01,219 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:01,219 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:01,222 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:01,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741850_1026 (size=70862) 2024-11-07T12:55:01,224 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened b8213e392b91eb8b27278f2821f7f941; next sequenceid=107; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721412, jitterRate=-0.08267728984355927}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T12:55:01,224 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:01,224 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for b8213e392b91eb8b27278f2821f7f941: Running coprocessor pre-open hook at 1730984101153Writing region info on filesystem at 1730984101153Initializing all the Stores at 1730984101157 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984101157Cleaning up temporary data from old regions at 1730984101219 (+62 ms)Running coprocessor post-open hooks at 1730984101224 (+5 ms)Region opened successfully at 1730984101224 2024-11-07T12:55:01,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741850_1026 (size=70862) 2024-11-07T12:55:01,225 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., pid=12, masterSystemTime=1730984101119 2024-11-07T12:55:01,225 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store b8213e392b91eb8b27278f2821f7f941:info, priority=-2147483648, current under compaction store size is 2 2024-11-07T12:55:01,225 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:55:01,225 DEBUG [RS:0;db9ad1cb6cf9:45177-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-07T12:55:01,227 INFO [RS:0;db9ad1cb6cf9:45177-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. 2024-11-07T12:55:01,227 DEBUG [RS:0;db9ad1cb6cf9:45177-longCompactions-0 {}] regionserver.HStore(1541): b8213e392b91eb8b27278f2821f7f941/info is initiating minor compaction (all files) 2024-11-07T12:55:01,227 INFO [RS:0;db9ad1cb6cf9:45177-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b8213e392b91eb8b27278f2821f7f941/info in TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. 2024-11-07T12:55:01,227 INFO [RS:0;db9ad1cb6cf9:45177-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/f90a3e7564904f1ea3c27d6b96794bbf.d29ad1038af06888c50aff34ddab2741->hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/f90a3e7564904f1ea3c27d6b96794bbf-top, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/TestLogRolling-testLogRolling=d29ad1038af06888c50aff34ddab2741-95a0df22c55b44bd951ebfd96c7b2bb9] into tmpdir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp, totalSize=96.4 K 2024-11-07T12:55:01,228 DEBUG [RS:0;db9ad1cb6cf9:45177-longCompactions-0 {}] compactions.Compactor(225): Compacting f90a3e7564904f1ea3c27d6b96794bbf.d29ad1038af06888c50aff34ddab2741, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1730984085912 2024-11-07T12:55:01,228 DEBUG [RS:0;db9ad1cb6cf9:45177-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=d29ad1038af06888c50aff34ddab2741-95a0df22c55b44bd951ebfd96c7b2bb9, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1730984100118 2024-11-07T12:55:01,230 DEBUG [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. 2024-11-07T12:55:01,230 INFO [RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. 2024-11-07T12:55:01,230 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=b8213e392b91eb8b27278f2821f7f941, regionState=OPEN, openSeqNum=107, regionLocation=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:55:01,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 because future has completed 2024-11-07T12:55:01,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741849_1025 (size=9847) 2024-11-07T12:55:01,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741849_1025 (size=9847) 2024-11-07T12:55:01,252 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/.tmp/info/4bc07d62318f460380783d2400621d70 2024-11-07T12:55:01,252 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/3003995566c0f4b3d16fdd754e25e4c8/.tmp/info/dac9de8d92764f6a8924f45a031a7ee2 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/3003995566c0f4b3d16fdd754e25e4c8/info/dac9de8d92764f6a8924f45a031a7ee2 2024-11-07T12:55:01,260 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-07T12:55:01,260 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 in 288 msec 2024-11-07T12:55:01,266 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-07T12:55:01,266 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b8213e392b91eb8b27278f2821f7f941, ASSIGN in 457 msec 2024-11-07T12:55:01,267 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 3003995566c0f4b3d16fdd754e25e4c8/info of 3003995566c0f4b3d16fdd754e25e4c8 into dac9de8d92764f6a8924f45a031a7ee2(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:55:01,267 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3003995566c0f4b3d16fdd754e25e4c8: 2024-11-07T12:55:01,267 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8., storeName=3003995566c0f4b3d16fdd754e25e4c8/info, priority=15, startTime=1730984101149; duration=0sec 2024-11-07T12:55:01,267 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:55:01,267 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3003995566c0f4b3d16fdd754e25e4c8:info 2024-11-07T12:55:01,269 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d29ad1038af06888c50aff34ddab2741, daughterA=3003995566c0f4b3d16fdd754e25e4c8, daughterB=b8213e392b91eb8b27278f2821f7f941 in 977 msec 2024-11-07T12:55:01,273 INFO [RS:0;db9ad1cb6cf9:45177-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b8213e392b91eb8b27278f2821f7f941#info#compaction#65 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:55:01,274 DEBUG [RS:0;db9ad1cb6cf9:45177-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/1b1c21893ee544379fe6d1951dff157a is 1080, key is row0062/info:/1730984098106/Put/seqid=0 2024-11-07T12:55:01,292 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/.tmp/ns/dfe10829b7434db495c46d100dc4c685 is 43, key is default/ns:d/1730984075697/Put/seqid=0 2024-11-07T12:55:01,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741851_1027 (size=27773) 2024-11-07T12:55:01,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741851_1027 (size=27773) 2024-11-07T12:55:01,315 DEBUG [RS:0;db9ad1cb6cf9:45177-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/1b1c21893ee544379fe6d1951dff157a as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/1b1c21893ee544379fe6d1951dff157a 2024-11-07T12:55:01,324 INFO [RS:0;db9ad1cb6cf9:45177-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in b8213e392b91eb8b27278f2821f7f941/info of b8213e392b91eb8b27278f2821f7f941 into 1b1c21893ee544379fe6d1951dff157a(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:55:01,324 DEBUG [RS:0;db9ad1cb6cf9:45177-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:55:01,324 INFO [RS:0;db9ad1cb6cf9:45177-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., storeName=b8213e392b91eb8b27278f2821f7f941/info, priority=14, startTime=1730984101225; duration=0sec 2024-11-07T12:55:01,324 DEBUG [RS:0;db9ad1cb6cf9:45177-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:55:01,325 DEBUG [RS:0;db9ad1cb6cf9:45177-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b8213e392b91eb8b27278f2821f7f941:info 2024-11-07T12:55:01,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741852_1028 (size=5153) 2024-11-07T12:55:01,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741852_1028 (size=5153) 2024-11-07T12:55:01,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/.tmp/ns/dfe10829b7434db495c46d100dc4c685 2024-11-07T12:55:01,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/.tmp/table/b645df09d6bd422eae9e60913654354e is 65, key is TestLogRolling-testLogRolling/table:state/1730984076739/Put/seqid=0 2024-11-07T12:55:01,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741853_1029 (size=5340) 2024-11-07T12:55:01,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741853_1029 (size=5340) 2024-11-07T12:55:01,406 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/.tmp/table/b645df09d6bd422eae9e60913654354e 2024-11-07T12:55:01,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/.tmp/info/4bc07d62318f460380783d2400621d70 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/info/4bc07d62318f460380783d2400621d70 2024-11-07T12:55:01,424 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/info/4bc07d62318f460380783d2400621d70, entries=30, sequenceid=17, filesize=9.6 K 2024-11-07T12:55:01,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/.tmp/ns/dfe10829b7434db495c46d100dc4c685 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/ns/dfe10829b7434db495c46d100dc4c685 2024-11-07T12:55:01,435 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/ns/dfe10829b7434db495c46d100dc4c685, entries=2, sequenceid=17, filesize=5.0 K 2024-11-07T12:55:01,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/.tmp/table/b645df09d6bd422eae9e60913654354e as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/table/b645df09d6bd422eae9e60913654354e 2024-11-07T12:55:01,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/table/b645df09d6bd422eae9e60913654354e, entries=2, sequenceid=17, filesize=5.2 K 2024-11-07T12:55:01,455 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 300ms, sequenceid=17, compaction requested=false 2024-11-07T12:55:01,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-07T12:55:01,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:01,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:02,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:02,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:03,529 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T12:55:03,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:03,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:04,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:04,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:05,566 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,566 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,566 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,567 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,567 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,567 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,568 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,568 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,604 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,604 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:05,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:05,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:06,116 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:55:06,117 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,117 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,119 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,119 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,150 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,151 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,151 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,151 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,151 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,152 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,160 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:06,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:06,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:07,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:07,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:08,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:08,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:09,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:09,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:10,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45177 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:47136 deadline: 1730984120213, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. is not online on db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:55:10,214 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. is not online on db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T12:55:10,214 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741. is not online on db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T12:55:10,214 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1730984075808.d29ad1038af06888c50aff34ddab2741., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=2 from cache 2024-11-07T12:55:10,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:10,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:11,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:11,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:12,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:12,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:13,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:13,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:14,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:14,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:15,715 INFO [master/db9ad1cb6cf9:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-07T12:55:15,715 INFO [master/db9ad1cb6cf9:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-07T12:55:15,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:15,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:17,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:17,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:18,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:18,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:19,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:19,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:20,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:20,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:20,653 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-07T12:55:21,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:21,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:22,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:22,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:23,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:23,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:24,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:24,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:25,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:25,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:26,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:26,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:27,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:27,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:28,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:28,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:29,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:29,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:30,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:30,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:30,350 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0083', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107] 2024-11-07T12:55:30,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:30,365 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b8213e392b91eb8b27278f2821f7f941 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:55:30,372 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/8aa7aee2ffd44a8095cd52961ab39155 is 1080, key is row0083/info:/1730984130352/Put/seqid=0 2024-11-07T12:55:30,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741854_1030 (size=12509) 2024-11-07T12:55:30,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741854_1030 (size=12509) 2024-11-07T12:55:30,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/8aa7aee2ffd44a8095cd52961ab39155 2024-11-07T12:55:30,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/8aa7aee2ffd44a8095cd52961ab39155 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8aa7aee2ffd44a8095cd52961ab39155 2024-11-07T12:55:30,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8aa7aee2ffd44a8095cd52961ab39155, entries=7, sequenceid=117, filesize=12.2 K 2024-11-07T12:55:30,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=7.36 KB/7532 for b8213e392b91eb8b27278f2821f7f941 in 43ms, sequenceid=117, compaction requested=false 2024-11-07T12:55:30,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:55:31,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:31,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:32,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:32,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:32,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:32,388 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b8213e392b91eb8b27278f2821f7f941 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-07T12:55:32,393 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/8981f5434eec4f1f822d12d1a35012b0 is 1080, key is row0090/info:/1730984130368/Put/seqid=0 2024-11-07T12:55:32,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741855_1031 (size=13586) 2024-11-07T12:55:32,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741855_1031 (size=13586) 2024-11-07T12:55:32,409 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/8981f5434eec4f1f822d12d1a35012b0 2024-11-07T12:55:32,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/8981f5434eec4f1f822d12d1a35012b0 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8981f5434eec4f1f822d12d1a35012b0 2024-11-07T12:55:32,425 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8981f5434eec4f1f822d12d1a35012b0, entries=8, sequenceid=128, filesize=13.3 K 2024-11-07T12:55:32,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=13.66 KB/13988 for b8213e392b91eb8b27278f2821f7f941 in 40ms, sequenceid=128, compaction requested=true 2024-11-07T12:55:32,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:55:32,428 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:55:32,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b8213e392b91eb8b27278f2821f7f941:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:55:32,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:55:32,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:32,435 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 53868 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:55:32,435 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1541): b8213e392b91eb8b27278f2821f7f941/info is initiating minor compaction (all files) 2024-11-07T12:55:32,436 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b8213e392b91eb8b27278f2821f7f941/info in TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. 2024-11-07T12:55:32,436 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/1b1c21893ee544379fe6d1951dff157a, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8aa7aee2ffd44a8095cd52961ab39155, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8981f5434eec4f1f822d12d1a35012b0] into tmpdir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp, totalSize=52.6 K 2024-11-07T12:55:32,436 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1b1c21893ee544379fe6d1951dff157a, keycount=21, bloomtype=ROW, size=27.1 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1730984098106 2024-11-07T12:55:32,437 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b8213e392b91eb8b27278f2821f7f941 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-07T12:55:32,437 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8aa7aee2ffd44a8095cd52961ab39155, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1730984130352 2024-11-07T12:55:32,437 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8981f5434eec4f1f822d12d1a35012b0, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1730984130368 2024-11-07T12:55:32,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/f31041f0517a4d88bbb094eec9deecdb is 1080, key is row0098/info:/1730984132389/Put/seqid=0 2024-11-07T12:55:32,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741856_1032 (size=23316) 2024-11-07T12:55:32,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741856_1032 (size=23316) 2024-11-07T12:55:32,464 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=148 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/f31041f0517a4d88bbb094eec9deecdb 2024-11-07T12:55:32,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45177 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-07T12:55:32,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45177 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:47136 deadline: 1730984142466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:55:32,467 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107 , the old value is region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T12:55:32,467 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T12:55:32,467 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107 because the exception is null or not the one we care about 2024-11-07T12:55:32,478 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b8213e392b91eb8b27278f2821f7f941#info#compaction#71 average throughput is 36.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:55:32,479 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/cfc42833125b4d3caf7d8b078259aef8 is 1080, key is row0062/info:/1730984098106/Put/seqid=0 2024-11-07T12:55:32,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/f31041f0517a4d88bbb094eec9deecdb as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/f31041f0517a4d88bbb094eec9deecdb 2024-11-07T12:55:32,494 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/f31041f0517a4d88bbb094eec9deecdb, entries=17, sequenceid=148, filesize=22.8 K 2024-11-07T12:55:32,495 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=12.61 KB/12912 for b8213e392b91eb8b27278f2821f7f941 in 59ms, sequenceid=148, compaction requested=false 2024-11-07T12:55:32,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:55:32,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741857_1033 (size=44066) 2024-11-07T12:55:32,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741857_1033 (size=44066) 2024-11-07T12:55:32,507 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/cfc42833125b4d3caf7d8b078259aef8 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/cfc42833125b4d3caf7d8b078259aef8 2024-11-07T12:55:32,515 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b8213e392b91eb8b27278f2821f7f941/info of b8213e392b91eb8b27278f2821f7f941 into cfc42833125b4d3caf7d8b078259aef8(size=43.0 K), total size for store is 65.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:55:32,515 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:55:32,516 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., storeName=b8213e392b91eb8b27278f2821f7f941/info, priority=13, startTime=1730984132428; duration=0sec 2024-11-07T12:55:32,516 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:55:32,516 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b8213e392b91eb8b27278f2821f7f941:info 2024-11-07T12:55:33,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:33,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:33,529 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T12:55:34,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:34,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:35,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:35,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:36,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:36,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:37,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:37,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:38,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:38,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:39,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:39,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:40,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:40,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:40,436 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=60, reuseRatio=86.96% 2024-11-07T12:55:40,436 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-07T12:55:41,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:41,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:42,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:42,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:42,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:42,545 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b8213e392b91eb8b27278f2821f7f941 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-07T12:55:42,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/5c8ac443269a4551889189448cc2b5ae is 1080, key is row0115/info:/1730984132438/Put/seqid=0 2024-11-07T12:55:42,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741858_1034 (size=19000) 2024-11-07T12:55:42,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741858_1034 (size=19000) 2024-11-07T12:55:42,570 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/5c8ac443269a4551889189448cc2b5ae 2024-11-07T12:55:42,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/5c8ac443269a4551889189448cc2b5ae as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/5c8ac443269a4551889189448cc2b5ae 2024-11-07T12:55:42,582 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/5c8ac443269a4551889189448cc2b5ae, entries=13, sequenceid=165, filesize=18.6 K 2024-11-07T12:55:42,583 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=1.05 KB/1076 for b8213e392b91eb8b27278f2821f7f941 in 39ms, sequenceid=165, compaction requested=true 2024-11-07T12:55:42,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:55:42,584 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:55:42,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b8213e392b91eb8b27278f2821f7f941:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:55:42,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:55:42,585 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 86382 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:55:42,585 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1541): b8213e392b91eb8b27278f2821f7f941/info is initiating minor compaction (all files) 2024-11-07T12:55:42,585 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b8213e392b91eb8b27278f2821f7f941/info in TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. 2024-11-07T12:55:42,585 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/cfc42833125b4d3caf7d8b078259aef8, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/f31041f0517a4d88bbb094eec9deecdb, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/5c8ac443269a4551889189448cc2b5ae] into tmpdir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp, totalSize=84.4 K 2024-11-07T12:55:42,586 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting cfc42833125b4d3caf7d8b078259aef8, keycount=36, bloomtype=ROW, size=43.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1730984098106 2024-11-07T12:55:42,586 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting f31041f0517a4d88bbb094eec9deecdb, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=148, earliestPutTs=1730984132389 2024-11-07T12:55:42,586 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5c8ac443269a4551889189448cc2b5ae, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1730984132438 2024-11-07T12:55:42,599 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b8213e392b91eb8b27278f2821f7f941#info#compaction#73 average throughput is 67.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:55:42,600 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/168f82d5f9254dc38e22108401a05724 is 1080, key is row0062/info:/1730984098106/Put/seqid=0 2024-11-07T12:55:42,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741859_1035 (size=76649) 2024-11-07T12:55:42,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741859_1035 (size=76649) 2024-11-07T12:55:42,615 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/168f82d5f9254dc38e22108401a05724 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/168f82d5f9254dc38e22108401a05724 2024-11-07T12:55:42,623 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b8213e392b91eb8b27278f2821f7f941/info of b8213e392b91eb8b27278f2821f7f941 into 168f82d5f9254dc38e22108401a05724(size=74.9 K), total size for store is 74.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:55:42,623 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:55:42,624 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., storeName=b8213e392b91eb8b27278f2821f7f941/info, priority=13, startTime=1730984142584; duration=0sec 2024-11-07T12:55:42,624 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:55:42,624 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b8213e392b91eb8b27278f2821f7f941:info 2024-11-07T12:55:43,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:43,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:44,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:44,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:44,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:44,563 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b8213e392b91eb8b27278f2821f7f941 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:55:44,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/1826526a8a564065a8f4bc1544b70044 is 1080, key is row0128/info:/1730984142546/Put/seqid=0 2024-11-07T12:55:44,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-07T12:55:44,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:47136 deadline: 1730984154629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:55:44,631 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107 , the old value is region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T12:55:44,631 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T12:55:44,631 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107 because the exception is null or not the one we care about 2024-11-07T12:55:44,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741860_1036 (size=12516) 2024-11-07T12:55:44,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741860_1036 (size=12516) 2024-11-07T12:55:44,648 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/1826526a8a564065a8f4bc1544b70044 2024-11-07T12:55:44,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/1826526a8a564065a8f4bc1544b70044 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/1826526a8a564065a8f4bc1544b70044 2024-11-07T12:55:44,666 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/1826526a8a564065a8f4bc1544b70044, entries=7, sequenceid=176, filesize=12.2 K 2024-11-07T12:55:44,668 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for b8213e392b91eb8b27278f2821f7f941 in 104ms, sequenceid=176, compaction requested=false 2024-11-07T12:55:44,668 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:55:45,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:45,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:46,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:46,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:46,124 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3003995566c0f4b3d16fdd754e25e4c8, had cached 0 bytes from a total of 70862 2024-11-07T12:55:46,153 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b8213e392b91eb8b27278f2821f7f941, had cached 0 bytes from a total of 89165 2024-11-07T12:55:47,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:47,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:47,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,030 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,032 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,032 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,032 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,034 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,541 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-07T12:55:47,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,543 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,543 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,569 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,569 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,569 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,569 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,570 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,570 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:47,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-07T12:55:48,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:48,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:49,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:49,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:50,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:50,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:51,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:51,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:52,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:52,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:53,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:53,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:54,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:54,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:54,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:54,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b8213e392b91eb8b27278f2821f7f941 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-07T12:55:54,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/b0c228d9acca4877a293d0e1ea6d0ad3 is 1080, key is row0135/info:/1730984144564/Put/seqid=0 2024-11-07T12:55:54,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741861_1037 (size=29784) 2024-11-07T12:55:54,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741861_1037 (size=29784) 2024-11-07T12:55:54,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/b0c228d9acca4877a293d0e1ea6d0ad3 2024-11-07T12:55:54,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/b0c228d9acca4877a293d0e1ea6d0ad3 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b0c228d9acca4877a293d0e1ea6d0ad3 2024-11-07T12:55:54,665 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b0c228d9acca4877a293d0e1ea6d0ad3, entries=23, sequenceid=202, filesize=29.1 K 2024-11-07T12:55:54,666 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=3.15 KB/3228 for b8213e392b91eb8b27278f2821f7f941 in 22ms, sequenceid=202, compaction requested=true 2024-11-07T12:55:54,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:55:54,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b8213e392b91eb8b27278f2821f7f941:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:55:54,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:55:54,666 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:55:54,667 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118949 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:55:54,667 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1541): b8213e392b91eb8b27278f2821f7f941/info is initiating minor compaction (all files) 2024-11-07T12:55:54,667 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b8213e392b91eb8b27278f2821f7f941/info in TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. 2024-11-07T12:55:54,667 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/168f82d5f9254dc38e22108401a05724, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/1826526a8a564065a8f4bc1544b70044, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b0c228d9acca4877a293d0e1ea6d0ad3] into tmpdir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp, totalSize=116.2 K 2024-11-07T12:55:54,668 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting 168f82d5f9254dc38e22108401a05724, keycount=66, bloomtype=ROW, size=74.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1730984098106 2024-11-07T12:55:54,668 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1826526a8a564065a8f4bc1544b70044, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1730984142546 2024-11-07T12:55:54,668 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting b0c228d9acca4877a293d0e1ea6d0ad3, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1730984144564 2024-11-07T12:55:54,678 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b8213e392b91eb8b27278f2821f7f941#info#compaction#76 average throughput is 98.51 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:55:54,679 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/b31423b917774f3e899c0a19cacd80b3 is 1080, key is row0062/info:/1730984098106/Put/seqid=0 2024-11-07T12:55:54,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741862_1038 (size=109099) 2024-11-07T12:55:54,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741862_1038 (size=109099) 2024-11-07T12:55:54,687 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/b31423b917774f3e899c0a19cacd80b3 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b31423b917774f3e899c0a19cacd80b3 2024-11-07T12:55:54,691 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b8213e392b91eb8b27278f2821f7f941/info of b8213e392b91eb8b27278f2821f7f941 into b31423b917774f3e899c0a19cacd80b3(size=106.5 K), total size for store is 106.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:55:54,692 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:55:54,692 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., storeName=b8213e392b91eb8b27278f2821f7f941/info, priority=13, startTime=1730984154666; duration=0sec 2024-11-07T12:55:54,692 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:55:54,692 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b8213e392b91eb8b27278f2821f7f941:info 2024-11-07T12:55:55,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:55,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:56,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:56,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:56,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:55:56,659 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b8213e392b91eb8b27278f2821f7f941 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:55:56,665 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/82a86c1a55c6458d8aeef4266b570b73 is 1080, key is row0158/info:/1730984154646/Put/seqid=0 2024-11-07T12:55:56,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741863_1039 (size=12516) 2024-11-07T12:55:56,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741863_1039 (size=12516) 2024-11-07T12:55:56,713 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/82a86c1a55c6458d8aeef4266b570b73 2024-11-07T12:55:56,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-07T12:55:56,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:47136 deadline: 1730984166714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:55:56,715 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107 , the old value is region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T12:55:56,715 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T12:55:56,715 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107 because the exception is null or not the one we care about 2024-11-07T12:55:56,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/82a86c1a55c6458d8aeef4266b570b73 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/82a86c1a55c6458d8aeef4266b570b73 2024-11-07T12:55:56,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/82a86c1a55c6458d8aeef4266b570b73, entries=7, sequenceid=213, filesize=12.2 K 2024-11-07T12:55:56,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for b8213e392b91eb8b27278f2821f7f941 in 68ms, sequenceid=213, compaction requested=false 2024-11-07T12:55:56,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:55:57,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:57,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:58,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:58,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:59,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:55:59,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:00,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:00,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:01,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:01,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:02,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:02,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:03,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:03,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:03,529 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-07T12:56:04,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:04,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:05,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:05,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:05,654 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-07T12:56:06,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:06,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:06,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:56:06,775 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b8213e392b91eb8b27278f2821f7f941 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-07T12:56:06,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/a95457632ea549868f6934e670e7bda6 is 1080, key is row0165/info:/1730984156660/Put/seqid=0 2024-11-07T12:56:06,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741864_1040 (size=29784) 2024-11-07T12:56:06,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741864_1040 (size=29784) 2024-11-07T12:56:06,795 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/a95457632ea549868f6934e670e7bda6 2024-11-07T12:56:06,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/a95457632ea549868f6934e670e7bda6 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/a95457632ea549868f6934e670e7bda6 2024-11-07T12:56:06,806 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/a95457632ea549868f6934e670e7bda6, entries=23, sequenceid=239, filesize=29.1 K 2024-11-07T12:56:06,807 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=5.25 KB/5380 for b8213e392b91eb8b27278f2821f7f941 in 32ms, sequenceid=239, compaction requested=true 2024-11-07T12:56:06,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:56:06,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b8213e392b91eb8b27278f2821f7f941:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:56:06,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:56:06,807 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:56:06,808 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 151399 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:56:06,808 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1541): b8213e392b91eb8b27278f2821f7f941/info is initiating minor compaction (all files) 2024-11-07T12:56:06,808 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b8213e392b91eb8b27278f2821f7f941/info in TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. 2024-11-07T12:56:06,809 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b31423b917774f3e899c0a19cacd80b3, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/82a86c1a55c6458d8aeef4266b570b73, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/a95457632ea549868f6934e670e7bda6] into tmpdir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp, totalSize=147.9 K 2024-11-07T12:56:06,809 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting b31423b917774f3e899c0a19cacd80b3, keycount=96, bloomtype=ROW, size=106.5 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1730984098106 2024-11-07T12:56:06,809 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting 82a86c1a55c6458d8aeef4266b570b73, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1730984154646 2024-11-07T12:56:06,810 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting a95457632ea549868f6934e670e7bda6, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1730984156660 2024-11-07T12:56:06,822 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b8213e392b91eb8b27278f2821f7f941#info#compaction#79 average throughput is 64.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:56:06,823 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/33570004628040cabab19f1135a835bc is 1080, key is row0062/info:/1730984098106/Put/seqid=0 2024-11-07T12:56:06,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741865_1041 (size=141746) 2024-11-07T12:56:06,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741865_1041 (size=141746) 2024-11-07T12:56:06,840 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/33570004628040cabab19f1135a835bc as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/33570004628040cabab19f1135a835bc 2024-11-07T12:56:06,846 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b8213e392b91eb8b27278f2821f7f941/info of b8213e392b91eb8b27278f2821f7f941 into 33570004628040cabab19f1135a835bc(size=138.4 K), total size for store is 138.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:56:06,846 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:56:06,846 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., storeName=b8213e392b91eb8b27278f2821f7f941/info, priority=13, startTime=1730984166807; duration=0sec 2024-11-07T12:56:06,846 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:56:06,846 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b8213e392b91eb8b27278f2821f7f941:info 2024-11-07T12:56:07,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:07,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:08,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:08,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:56:08,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b8213e392b91eb8b27278f2821f7f941 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:56:08,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/324c7a7b62574d269bb7146004ad1767 is 1080, key is row0188/info:/1730984166776/Put/seqid=0 2024-11-07T12:56:08,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741866_1042 (size=12516) 2024-11-07T12:56:08,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741866_1042 (size=12516) 2024-11-07T12:56:08,810 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/324c7a7b62574d269bb7146004ad1767 2024-11-07T12:56:08,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/324c7a7b62574d269bb7146004ad1767 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/324c7a7b62574d269bb7146004ad1767 2024-11-07T12:56:08,822 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/324c7a7b62574d269bb7146004ad1767, entries=7, sequenceid=250, filesize=12.2 K 2024-11-07T12:56:08,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for b8213e392b91eb8b27278f2821f7f941 in 36ms, sequenceid=250, compaction requested=false 2024-11-07T12:56:08,825 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:56:08,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:56:08,826 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b8213e392b91eb8b27278f2821f7f941 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-07T12:56:08,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/b17f2e87011b4acf8331e62e996c7c37 is 1080, key is row0195/info:/1730984168790/Put/seqid=0 2024-11-07T12:56:08,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741867_1043 (size=23329) 2024-11-07T12:56:08,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741867_1043 (size=23329) 2024-11-07T12:56:08,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-07T12:56:08,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:47136 deadline: 1730984178855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:56:08,856 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/b17f2e87011b4acf8331e62e996c7c37 2024-11-07T12:56:08,856 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107 , the old value is region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T12:56:08,856 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b8213e392b91eb8b27278f2821f7f941, server=db9ad1cb6cf9,45177,1730984073836 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T12:56:08,856 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., hostname=db9ad1cb6cf9,45177,1730984073836, seqNum=107 because the exception is null or not the one we care about 2024-11-07T12:56:08,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/b17f2e87011b4acf8331e62e996c7c37 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b17f2e87011b4acf8331e62e996c7c37 2024-11-07T12:56:08,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b17f2e87011b4acf8331e62e996c7c37, entries=17, sequenceid=270, filesize=22.8 K 2024-11-07T12:56:08,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=12.61 KB/12912 for b8213e392b91eb8b27278f2821f7f941 in 41ms, sequenceid=270, compaction requested=true 2024-11-07T12:56:08,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:56:08,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b8213e392b91eb8b27278f2821f7f941:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:56:08,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:56:08,868 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:56:08,869 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 177591 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:56:08,869 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1541): b8213e392b91eb8b27278f2821f7f941/info is initiating minor compaction (all files) 2024-11-07T12:56:08,869 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b8213e392b91eb8b27278f2821f7f941/info in TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. 2024-11-07T12:56:08,869 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/33570004628040cabab19f1135a835bc, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/324c7a7b62574d269bb7146004ad1767, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b17f2e87011b4acf8331e62e996c7c37] into tmpdir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp, totalSize=173.4 K 2024-11-07T12:56:08,870 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting 33570004628040cabab19f1135a835bc, keycount=126, bloomtype=ROW, size=138.4 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1730984098106 2024-11-07T12:56:08,870 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting 324c7a7b62574d269bb7146004ad1767, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1730984166776 2024-11-07T12:56:08,871 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting b17f2e87011b4acf8331e62e996c7c37, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1730984168790 2024-11-07T12:56:08,888 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b8213e392b91eb8b27278f2821f7f941#info#compaction#82 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:56:08,888 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/8769293799234108acadddbfa468a95c is 1080, key is row0062/info:/1730984098106/Put/seqid=0 2024-11-07T12:56:08,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741868_1044 (size=167737) 2024-11-07T12:56:08,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741868_1044 (size=167737) 2024-11-07T12:56:08,923 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/8769293799234108acadddbfa468a95c as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8769293799234108acadddbfa468a95c 2024-11-07T12:56:08,932 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b8213e392b91eb8b27278f2821f7f941/info of b8213e392b91eb8b27278f2821f7f941 into 8769293799234108acadddbfa468a95c(size=163.8 K), total size for store is 163.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:56:08,932 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:56:08,932 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., storeName=b8213e392b91eb8b27278f2821f7f941/info, priority=13, startTime=1730984168868; duration=0sec 2024-11-07T12:56:08,933 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:56:08,933 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b8213e392b91eb8b27278f2821f7f941:info 2024-11-07T12:56:09,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:09,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:10,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:10,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:11,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:11,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:12,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:12,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:13,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:13,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:13,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 after 196127ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:56:13,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta after 196114ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:56:14,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:14,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:15,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:15,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:16,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:16,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:17,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:17,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:18,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:18,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:18,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:56:18,944 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b8213e392b91eb8b27278f2821f7f941 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-07T12:56:18,949 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/61a5f75bcbc94cd3a623d44e7fa17e19 is 1080, key is row0212/info:/1730984168827/Put/seqid=0 2024-11-07T12:56:18,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741869_1045 (size=19013) 2024-11-07T12:56:18,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741869_1045 (size=19013) 2024-11-07T12:56:18,955 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/61a5f75bcbc94cd3a623d44e7fa17e19 2024-11-07T12:56:18,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/61a5f75bcbc94cd3a623d44e7fa17e19 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/61a5f75bcbc94cd3a623d44e7fa17e19 2024-11-07T12:56:18,964 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/61a5f75bcbc94cd3a623d44e7fa17e19, entries=13, sequenceid=287, filesize=18.6 K 2024-11-07T12:56:18,965 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=0 B/0 for b8213e392b91eb8b27278f2821f7f941 in 21ms, sequenceid=287, compaction requested=false 2024-11-07T12:56:18,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:56:19,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:19,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:20,038 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:20,038 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:20,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:56:20,955 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b8213e392b91eb8b27278f2821f7f941 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-07T12:56:20,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/a3dd3bb007304e309124e75526915efb is 1080, key is row0225/info:/1730984180946/Put/seqid=0 2024-11-07T12:56:20,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741870_1046 (size=12523) 2024-11-07T12:56:20,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741870_1046 (size=12523) 2024-11-07T12:56:20,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/a3dd3bb007304e309124e75526915efb 2024-11-07T12:56:20,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/a3dd3bb007304e309124e75526915efb as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/a3dd3bb007304e309124e75526915efb 2024-11-07T12:56:20,977 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/a3dd3bb007304e309124e75526915efb, entries=7, sequenceid=297, filesize=12.2 K 2024-11-07T12:56:20,977 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for b8213e392b91eb8b27278f2821f7f941 in 22ms, sequenceid=297, compaction requested=true 2024-11-07T12:56:20,978 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:56:20,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b8213e392b91eb8b27278f2821f7f941:info, priority=-2147483648, current under compaction store size is 1 2024-11-07T12:56:20,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:56:20,978 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-07T12:56:20,979 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 199273 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-07T12:56:20,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45177 {}] regionserver.HRegion(8855): Flush requested on b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:56:20,979 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1541): b8213e392b91eb8b27278f2821f7f941/info is initiating minor compaction (all files) 2024-11-07T12:56:20,979 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b8213e392b91eb8b27278f2821f7f941/info in TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. 2024-11-07T12:56:20,979 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b8213e392b91eb8b27278f2821f7f941 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-07T12:56:20,979 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8769293799234108acadddbfa468a95c, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/61a5f75bcbc94cd3a623d44e7fa17e19, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/a3dd3bb007304e309124e75526915efb] into tmpdir=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp, totalSize=194.6 K 2024-11-07T12:56:20,980 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8769293799234108acadddbfa468a95c, keycount=150, bloomtype=ROW, size=163.8 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1730984098106 2024-11-07T12:56:20,980 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting 61a5f75bcbc94cd3a623d44e7fa17e19, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1730984168827 2024-11-07T12:56:20,980 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] compactions.Compactor(225): Compacting a3dd3bb007304e309124e75526915efb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1730984180946 2024-11-07T12:56:20,983 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/27a8517988384172b3469dad4fc4326a is 1080, key is row0232/info:/1730984180956/Put/seqid=0 2024-11-07T12:56:20,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741871_1047 (size=22254) 2024-11-07T12:56:20,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741871_1047 (size=22254) 2024-11-07T12:56:20,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/27a8517988384172b3469dad4fc4326a 2024-11-07T12:56:20,993 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b8213e392b91eb8b27278f2821f7f941#info#compaction#86 average throughput is 58.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-07T12:56:20,994 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/6883d061c90c4392931a62a8166854e3 is 1080, key is row0062/info:/1730984098106/Put/seqid=0 2024-11-07T12:56:20,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/27a8517988384172b3469dad4fc4326a as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/27a8517988384172b3469dad4fc4326a 2024-11-07T12:56:20,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741872_1048 (size=189427) 2024-11-07T12:56:20,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741872_1048 (size=189427) 2024-11-07T12:56:21,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/27a8517988384172b3469dad4fc4326a, entries=16, sequenceid=316, filesize=21.7 K 2024-11-07T12:56:21,001 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=9.46 KB/9684 for b8213e392b91eb8b27278f2821f7f941 in 22ms, sequenceid=316, compaction requested=false 2024-11-07T12:56:21,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:56:21,002 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/6883d061c90c4392931a62a8166854e3 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/6883d061c90c4392931a62a8166854e3 2024-11-07T12:56:21,007 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b8213e392b91eb8b27278f2821f7f941/info of b8213e392b91eb8b27278f2821f7f941 into 6883d061c90c4392931a62a8166854e3(size=185.0 K), total size for store is 206.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-07T12:56:21,007 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:56:21,007 INFO [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941., storeName=b8213e392b91eb8b27278f2821f7f941/info, priority=13, startTime=1730984180978; duration=0sec 2024-11-07T12:56:21,007 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-07T12:56:21,007 DEBUG [RS:0;db9ad1cb6cf9:45177-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b8213e392b91eb8b27278f2821f7f941:info 2024-11-07T12:56:21,038 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:21,038 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:22,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:22,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:22,993 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-07T12:56:22,994 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C45177%2C1730984073836.1730984182993 2024-11-07T12:56:23,010 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,010 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,011 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,011 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,011 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,011 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/WALs/db9ad1cb6cf9,45177,1730984073836/db9ad1cb6cf9%2C45177%2C1730984073836.1730984074597 with entries=308, filesize=306.93 KB; new WAL /user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/WALs/db9ad1cb6cf9,45177,1730984073836/db9ad1cb6cf9%2C45177%2C1730984073836.1730984182993 2024-11-07T12:56:23,012 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40207:40207),(127.0.0.1/127.0.0.1:45039:45039)] 2024-11-07T12:56:23,012 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/WALs/db9ad1cb6cf9,45177,1730984073836/db9ad1cb6cf9%2C45177%2C1730984073836.1730984074597 is not closed yet, will try archiving it next time 2024-11-07T12:56:23,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741832_1008 (size=314308) 2024-11-07T12:56:23,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741832_1008 (size=314308) 2024-11-07T12:56:23,015 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 3003995566c0f4b3d16fdd754e25e4c8: 2024-11-07T12:56:23,015 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-07T12:56:23,018 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/.tmp/info/81fd8f6244ea433c89f884a6effa09b4 is 193, key is TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941./info:regioninfo/1730984101230/Put/seqid=0 2024-11-07T12:56:23,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741874_1050 (size=6223) 2024-11-07T12:56:23,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741874_1050 (size=6223) 2024-11-07T12:56:23,023 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/.tmp/info/81fd8f6244ea433c89f884a6effa09b4 2024-11-07T12:56:23,027 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/.tmp/info/81fd8f6244ea433c89f884a6effa09b4 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/info/81fd8f6244ea433c89f884a6effa09b4 2024-11-07T12:56:23,032 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/info/81fd8f6244ea433c89f884a6effa09b4, entries=5, sequenceid=21, filesize=6.1 K 2024-11-07T12:56:23,033 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 17ms, sequenceid=21, compaction requested=false 2024-11-07T12:56:23,033 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-07T12:56:23,033 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing b8213e392b91eb8b27278f2821f7f941 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-07T12:56:23,036 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/6f14adab812d4b158ab51f6ef4af65a3 is 1080, key is row0248/info:/1730984180980/Put/seqid=0 2024-11-07T12:56:23,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:23,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:23,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741875_1051 (size=14681) 2024-11-07T12:56:23,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741875_1051 (size=14681) 2024-11-07T12:56:23,041 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/6f14adab812d4b158ab51f6ef4af65a3 2024-11-07T12:56:23,045 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/.tmp/info/6f14adab812d4b158ab51f6ef4af65a3 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/6f14adab812d4b158ab51f6ef4af65a3 2024-11-07T12:56:23,049 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/6f14adab812d4b158ab51f6ef4af65a3, entries=9, sequenceid=329, filesize=14.3 K 2024-11-07T12:56:23,050 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for b8213e392b91eb8b27278f2821f7f941 in 17ms, sequenceid=329, compaction requested=true 2024-11-07T12:56:23,050 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for b8213e392b91eb8b27278f2821f7f941: 2024-11-07T12:56:23,050 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C45177%2C1730984073836.1730984183050 2024-11-07T12:56:23,054 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,055 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,055 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,055 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,055 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,055 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/WALs/db9ad1cb6cf9,45177,1730984073836/db9ad1cb6cf9%2C45177%2C1730984073836.1730984182993 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/WALs/db9ad1cb6cf9,45177,1730984073836/db9ad1cb6cf9%2C45177%2C1730984073836.1730984183050 2024-11-07T12:56:23,056 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40207:40207),(127.0.0.1/127.0.0.1:45039:45039)] 2024-11-07T12:56:23,056 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/WALs/db9ad1cb6cf9,45177,1730984073836/db9ad1cb6cf9%2C45177%2C1730984073836.1730984182993 is not closed yet, will try archiving it next time 2024-11-07T12:56:23,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741873_1049 (size=731) 2024-11-07T12:56:23,056 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/WALs/db9ad1cb6cf9,45177,1730984073836/db9ad1cb6cf9%2C45177%2C1730984073836.1730984074597 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/oldWALs/db9ad1cb6cf9%2C45177%2C1730984073836.1730984074597 2024-11-07T12:56:23,057 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T12:56:23,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741873_1049 (size=731) 2024-11-07T12:56:23,061 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/WALs/db9ad1cb6cf9,45177,1730984073836/db9ad1cb6cf9%2C45177%2C1730984073836.1730984182993 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/oldWALs/db9ad1cb6cf9%2C45177%2C1730984073836.1730984182993 2024-11-07T12:56:23,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-07T12:56:23,157 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:56:23,158 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:56:23,158 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:56:23,158 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:56:23,158 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-07T12:56:23,158 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T12:56:23,158 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=861034659, stopped=false 2024-11-07T12:56:23,158 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=db9ad1cb6cf9,41575,1730984073685 2024-11-07T12:56:23,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:56:23,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:56:23,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:23,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:23,160 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:56:23,160 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:56:23,160 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:56:23,160 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:56:23,161 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'db9ad1cb6cf9,45177,1730984073836' ***** 2024-11-07T12:56:23,161 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T12:56:23,161 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:56:23,161 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:56:23,161 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:56:23,161 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T12:56:23,161 INFO [RS:0;db9ad1cb6cf9:45177 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:56:23,162 INFO [RS:0;db9ad1cb6cf9:45177 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:56:23,162 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(3091): Received CLOSE for 3003995566c0f4b3d16fdd754e25e4c8 2024-11-07T12:56:23,162 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(3091): Received CLOSE for b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:56:23,162 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(959): stopping server db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:56:23,162 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:56:23,162 INFO [RS:0;db9ad1cb6cf9:45177 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;db9ad1cb6cf9:45177. 2024-11-07T12:56:23,162 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3003995566c0f4b3d16fdd754e25e4c8, disabling compactions & flushes 2024-11-07T12:56:23,162 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:56:23,162 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:56:23,162 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8. 2024-11-07T12:56:23,162 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8. 2024-11-07T12:56:23,162 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8. after waiting 0 ms 2024-11-07T12:56:23,162 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8. 2024-11-07T12:56:23,162 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:56:23,162 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:56:23,162 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:56:23,162 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-07T12:56:23,162 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/3003995566c0f4b3d16fdd754e25e4c8/info/f90a3e7564904f1ea3c27d6b96794bbf.d29ad1038af06888c50aff34ddab2741->hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/f90a3e7564904f1ea3c27d6b96794bbf-bottom] to archive 2024-11-07T12:56:23,163 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-07T12:56:23,163 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(1325): Online Regions={3003995566c0f4b3d16fdd754e25e4c8=TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8., 1588230740=hbase:meta,,1.1588230740, b8213e392b91eb8b27278f2821f7f941=TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.} 2024-11-07T12:56:23,163 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 3003995566c0f4b3d16fdd754e25e4c8, b8213e392b91eb8b27278f2821f7f941 2024-11-07T12:56:23,163 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:56:23,163 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:56:23,163 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:56:23,163 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:56:23,163 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:56:23,164 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T12:56:23,165 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/3003995566c0f4b3d16fdd754e25e4c8/info/f90a3e7564904f1ea3c27d6b96794bbf.d29ad1038af06888c50aff34ddab2741 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/3003995566c0f4b3d16fdd754e25e4c8/info/f90a3e7564904f1ea3c27d6b96794bbf.d29ad1038af06888c50aff34ddab2741 2024-11-07T12:56:23,166 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=db9ad1cb6cf9:41575 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-07T12:56:23,166 WARN [StoreCloser-TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-07T12:56:23,167 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-07T12:56:23,168 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:56:23,168 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:56:23,168 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730984183163Running coprocessor pre-close hooks at 1730984183163Disabling compacts and flushes for region at 1730984183163Disabling writes for close at 1730984183163Writing region close event to WAL at 1730984183164 (+1 ms)Running coprocessor post-close hooks at 1730984183168 (+4 ms)Closed at 1730984183168 2024-11-07T12:56:23,168 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T12:56:23,169 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/3003995566c0f4b3d16fdd754e25e4c8/recovered.edits/111.seqid, newMaxSeqId=111, maxSeqId=106 2024-11-07T12:56:23,170 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8. 2024-11-07T12:56:23,170 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3003995566c0f4b3d16fdd754e25e4c8: Waiting for close lock at 1730984183162Running coprocessor pre-close hooks at 1730984183162Disabling compacts and flushes for region at 1730984183162Disabling writes for close at 1730984183162Writing region close event to WAL at 1730984183166 (+4 ms)Running coprocessor post-close hooks at 1730984183170 (+4 ms)Closed at 1730984183170 2024-11-07T12:56:23,170 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1730984100289.3003995566c0f4b3d16fdd754e25e4c8. 2024-11-07T12:56:23,170 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b8213e392b91eb8b27278f2821f7f941, disabling compactions & flushes 2024-11-07T12:56:23,170 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. 2024-11-07T12:56:23,170 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. 2024-11-07T12:56:23,170 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. after waiting 0 ms 2024-11-07T12:56:23,170 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. 2024-11-07T12:56:23,170 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/f90a3e7564904f1ea3c27d6b96794bbf.d29ad1038af06888c50aff34ddab2741->hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/d29ad1038af06888c50aff34ddab2741/info/f90a3e7564904f1ea3c27d6b96794bbf-top, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/1b1c21893ee544379fe6d1951dff157a, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/TestLogRolling-testLogRolling=d29ad1038af06888c50aff34ddab2741-95a0df22c55b44bd951ebfd96c7b2bb9, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8aa7aee2ffd44a8095cd52961ab39155, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/cfc42833125b4d3caf7d8b078259aef8, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8981f5434eec4f1f822d12d1a35012b0, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/f31041f0517a4d88bbb094eec9deecdb, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/168f82d5f9254dc38e22108401a05724, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/5c8ac443269a4551889189448cc2b5ae, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/1826526a8a564065a8f4bc1544b70044, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b31423b917774f3e899c0a19cacd80b3, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b0c228d9acca4877a293d0e1ea6d0ad3, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/82a86c1a55c6458d8aeef4266b570b73, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/33570004628040cabab19f1135a835bc, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/a95457632ea549868f6934e670e7bda6, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/324c7a7b62574d269bb7146004ad1767, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8769293799234108acadddbfa468a95c, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b17f2e87011b4acf8331e62e996c7c37, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/61a5f75bcbc94cd3a623d44e7fa17e19, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/a3dd3bb007304e309124e75526915efb] to archive 2024-11-07T12:56:23,171 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-07T12:56:23,173 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/f90a3e7564904f1ea3c27d6b96794bbf.d29ad1038af06888c50aff34ddab2741 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/f90a3e7564904f1ea3c27d6b96794bbf.d29ad1038af06888c50aff34ddab2741 2024-11-07T12:56:23,174 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/1b1c21893ee544379fe6d1951dff157a to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/1b1c21893ee544379fe6d1951dff157a 2024-11-07T12:56:23,175 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/TestLogRolling-testLogRolling=d29ad1038af06888c50aff34ddab2741-95a0df22c55b44bd951ebfd96c7b2bb9 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/TestLogRolling-testLogRolling=d29ad1038af06888c50aff34ddab2741-95a0df22c55b44bd951ebfd96c7b2bb9 2024-11-07T12:56:23,176 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8aa7aee2ffd44a8095cd52961ab39155 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8aa7aee2ffd44a8095cd52961ab39155 2024-11-07T12:56:23,177 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/cfc42833125b4d3caf7d8b078259aef8 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/cfc42833125b4d3caf7d8b078259aef8 2024-11-07T12:56:23,178 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8981f5434eec4f1f822d12d1a35012b0 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8981f5434eec4f1f822d12d1a35012b0 2024-11-07T12:56:23,179 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/f31041f0517a4d88bbb094eec9deecdb to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/f31041f0517a4d88bbb094eec9deecdb 2024-11-07T12:56:23,180 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/168f82d5f9254dc38e22108401a05724 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/168f82d5f9254dc38e22108401a05724 2024-11-07T12:56:23,181 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/5c8ac443269a4551889189448cc2b5ae to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/5c8ac443269a4551889189448cc2b5ae 2024-11-07T12:56:23,182 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/1826526a8a564065a8f4bc1544b70044 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/1826526a8a564065a8f4bc1544b70044 2024-11-07T12:56:23,183 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b31423b917774f3e899c0a19cacd80b3 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b31423b917774f3e899c0a19cacd80b3 2024-11-07T12:56:23,184 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b0c228d9acca4877a293d0e1ea6d0ad3 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b0c228d9acca4877a293d0e1ea6d0ad3 2024-11-07T12:56:23,185 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/82a86c1a55c6458d8aeef4266b570b73 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/82a86c1a55c6458d8aeef4266b570b73 2024-11-07T12:56:23,186 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/33570004628040cabab19f1135a835bc to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/33570004628040cabab19f1135a835bc 2024-11-07T12:56:23,187 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/a95457632ea549868f6934e670e7bda6 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/a95457632ea549868f6934e670e7bda6 2024-11-07T12:56:23,188 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/324c7a7b62574d269bb7146004ad1767 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/324c7a7b62574d269bb7146004ad1767 2024-11-07T12:56:23,189 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8769293799234108acadddbfa468a95c to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/8769293799234108acadddbfa468a95c 2024-11-07T12:56:23,190 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b17f2e87011b4acf8331e62e996c7c37 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/b17f2e87011b4acf8331e62e996c7c37 2024-11-07T12:56:23,191 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/61a5f75bcbc94cd3a623d44e7fa17e19 to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/61a5f75bcbc94cd3a623d44e7fa17e19 2024-11-07T12:56:23,192 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/a3dd3bb007304e309124e75526915efb to hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/archive/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/info/a3dd3bb007304e309124e75526915efb 2024-11-07T12:56:23,193 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [1b1c21893ee544379fe6d1951dff157a=27773, 8aa7aee2ffd44a8095cd52961ab39155=12509, cfc42833125b4d3caf7d8b078259aef8=44066, 8981f5434eec4f1f822d12d1a35012b0=13586, f31041f0517a4d88bbb094eec9deecdb=23316, 168f82d5f9254dc38e22108401a05724=76649, 5c8ac443269a4551889189448cc2b5ae=19000, 1826526a8a564065a8f4bc1544b70044=12516, b31423b917774f3e899c0a19cacd80b3=109099, b0c228d9acca4877a293d0e1ea6d0ad3=29784, 82a86c1a55c6458d8aeef4266b570b73=12516, 33570004628040cabab19f1135a835bc=141746, a95457632ea549868f6934e670e7bda6=29784, 324c7a7b62574d269bb7146004ad1767=12516, 8769293799234108acadddbfa468a95c=167737, b17f2e87011b4acf8331e62e996c7c37=23329, 61a5f75bcbc94cd3a623d44e7fa17e19=19013, a3dd3bb007304e309124e75526915efb=12523] 2024-11-07T12:56:23,196 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/data/default/TestLogRolling-testLogRolling/b8213e392b91eb8b27278f2821f7f941/recovered.edits/332.seqid, newMaxSeqId=332, maxSeqId=106 2024-11-07T12:56:23,196 INFO [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. 2024-11-07T12:56:23,196 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b8213e392b91eb8b27278f2821f7f941: Waiting for close lock at 1730984183170Running coprocessor pre-close hooks at 1730984183170Disabling compacts and flushes for region at 1730984183170Disabling writes for close at 1730984183170Writing region close event to WAL at 1730984183193 (+23 ms)Running coprocessor post-close hooks at 1730984183196 (+3 ms)Closed at 1730984183196 2024-11-07T12:56:23,196 DEBUG [RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1730984100289.b8213e392b91eb8b27278f2821f7f941. 2024-11-07T12:56:23,363 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(976): stopping server db9ad1cb6cf9,45177,1730984073836; all regions closed. 2024-11-07T12:56:23,363 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,364 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,364 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,364 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,364 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741834_1010 (size=8107) 2024-11-07T12:56:23,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741834_1010 (size=8107) 2024-11-07T12:56:23,368 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/oldWALs 2024-11-07T12:56:23,368 INFO [RS:0;db9ad1cb6cf9:45177 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db9ad1cb6cf9%2C45177%2C1730984073836.meta:.meta(num 1730984075630) 2024-11-07T12:56:23,368 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,369 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,369 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,369 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,369 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741876_1052 (size=778) 2024-11-07T12:56:23,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741876_1052 (size=778) 2024-11-07T12:56:23,372 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/oldWALs 2024-11-07T12:56:23,372 INFO [RS:0;db9ad1cb6cf9:45177 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db9ad1cb6cf9%2C45177%2C1730984073836:(num 1730984183050) 2024-11-07T12:56:23,372 DEBUG [RS:0;db9ad1cb6cf9:45177 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:56:23,372 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:56:23,372 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:56:23,373 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.ChoreService(370): Chore service for: regionserver/db9ad1cb6cf9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-07T12:56:23,373 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:56:23,373 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:56:23,373 INFO [RS:0;db9ad1cb6cf9:45177 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45177 2024-11-07T12:56:23,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db9ad1cb6cf9,45177,1730984073836 2024-11-07T12:56:23,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:56:23,375 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:56:23,377 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db9ad1cb6cf9,45177,1730984073836] 2024-11-07T12:56:23,378 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/db9ad1cb6cf9,45177,1730984073836 already deleted, retry=false 2024-11-07T12:56:23,378 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; db9ad1cb6cf9,45177,1730984073836 expired; onlineServers=0 2024-11-07T12:56:23,378 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'db9ad1cb6cf9,41575,1730984073685' ***** 2024-11-07T12:56:23,378 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T12:56:23,378 INFO [M:0;db9ad1cb6cf9:41575 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:56:23,378 INFO [M:0;db9ad1cb6cf9:41575 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:56:23,378 DEBUG [M:0;db9ad1cb6cf9:41575 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T12:56:23,378 DEBUG [M:0;db9ad1cb6cf9:41575 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T12:56:23,378 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T12:56:23,378 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730984074358 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730984074358,5,FailOnTimeoutGroup] 2024-11-07T12:56:23,378 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730984074363 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730984074363,5,FailOnTimeoutGroup] 2024-11-07T12:56:23,378 INFO [M:0;db9ad1cb6cf9:41575 {}] hbase.ChoreService(370): Chore service for: master/db9ad1cb6cf9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-07T12:56:23,378 INFO [M:0;db9ad1cb6cf9:41575 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:56:23,379 DEBUG [M:0;db9ad1cb6cf9:41575 {}] master.HMaster(1795): Stopping service threads 2024-11-07T12:56:23,379 INFO [M:0;db9ad1cb6cf9:41575 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T12:56:23,379 INFO [M:0;db9ad1cb6cf9:41575 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:56:23,379 INFO [M:0;db9ad1cb6cf9:41575 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T12:56:23,379 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T12:56:23,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:56:23,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:23,380 DEBUG [M:0;db9ad1cb6cf9:41575 {}] zookeeper.ZKUtil(347): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T12:56:23,380 WARN [M:0;db9ad1cb6cf9:41575 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T12:56:23,380 INFO [M:0;db9ad1cb6cf9:41575 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/.lastflushedseqids 2024-11-07T12:56:23,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741877_1053 (size=228) 2024-11-07T12:56:23,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741877_1053 (size=228) 2024-11-07T12:56:23,388 INFO [M:0;db9ad1cb6cf9:41575 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-07T12:56:23,389 INFO [M:0;db9ad1cb6cf9:41575 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T12:56:23,389 DEBUG [M:0;db9ad1cb6cf9:41575 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:56:23,389 INFO [M:0;db9ad1cb6cf9:41575 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:56:23,389 DEBUG [M:0;db9ad1cb6cf9:41575 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:56:23,389 DEBUG [M:0;db9ad1cb6cf9:41575 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:56:23,389 DEBUG [M:0;db9ad1cb6cf9:41575 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:56:23,389 INFO [M:0;db9ad1cb6cf9:41575 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.36 KB 2024-11-07T12:56:23,404 DEBUG [M:0;db9ad1cb6cf9:41575 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ffc4299e40c44bdda06de53388e2307b is 82, key is hbase:meta,,1/info:regioninfo/1730984075678/Put/seqid=0 2024-11-07T12:56:23,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741878_1054 (size=5672) 2024-11-07T12:56:23,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741878_1054 (size=5672) 2024-11-07T12:56:23,409 INFO [M:0;db9ad1cb6cf9:41575 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ffc4299e40c44bdda06de53388e2307b 2024-11-07T12:56:23,426 DEBUG [M:0;db9ad1cb6cf9:41575 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/27e3d9c4de8a4c9cb35dcc2568a5da77 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1730984076746/Put/seqid=0 2024-11-07T12:56:23,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741879_1055 (size=7091) 2024-11-07T12:56:23,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741879_1055 (size=7091) 2024-11-07T12:56:23,431 INFO [M:0;db9ad1cb6cf9:41575 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/27e3d9c4de8a4c9cb35dcc2568a5da77 2024-11-07T12:56:23,434 INFO [M:0;db9ad1cb6cf9:41575 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 27e3d9c4de8a4c9cb35dcc2568a5da77 2024-11-07T12:56:23,447 DEBUG [M:0;db9ad1cb6cf9:41575 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6515f9dfa1bb4d6bb7f03bfb81b5a8fa is 69, key is db9ad1cb6cf9,45177,1730984073836/rs:state/1730984074421/Put/seqid=0 2024-11-07T12:56:23,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741880_1056 (size=5156) 2024-11-07T12:56:23,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741880_1056 (size=5156) 2024-11-07T12:56:23,452 INFO [M:0;db9ad1cb6cf9:41575 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6515f9dfa1bb4d6bb7f03bfb81b5a8fa 2024-11-07T12:56:23,471 DEBUG [M:0;db9ad1cb6cf9:41575 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/045f057374024d64b95a8f260abef09a is 52, key is load_balancer_on/state:d/1730984075804/Put/seqid=0 2024-11-07T12:56:23,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741881_1057 (size=5056) 2024-11-07T12:56:23,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741881_1057 (size=5056) 2024-11-07T12:56:23,476 INFO [M:0;db9ad1cb6cf9:41575 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/045f057374024d64b95a8f260abef09a 2024-11-07T12:56:23,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:56:23,477 INFO [RS:0;db9ad1cb6cf9:45177 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:56:23,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45177-0x1001a4ee4000001, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:56:23,477 INFO [RS:0;db9ad1cb6cf9:45177 {}] regionserver.HRegionServer(1031): Exiting; stopping=db9ad1cb6cf9,45177,1730984073836; zookeeper connection closed. 2024-11-07T12:56:23,477 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@20426fad {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@20426fad 2024-11-07T12:56:23,477 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-07T12:56:23,482 DEBUG [M:0;db9ad1cb6cf9:41575 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ffc4299e40c44bdda06de53388e2307b as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ffc4299e40c44bdda06de53388e2307b 2024-11-07T12:56:23,486 INFO [M:0;db9ad1cb6cf9:41575 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ffc4299e40c44bdda06de53388e2307b, entries=8, sequenceid=125, filesize=5.5 K 2024-11-07T12:56:23,487 DEBUG [M:0;db9ad1cb6cf9:41575 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/27e3d9c4de8a4c9cb35dcc2568a5da77 as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/27e3d9c4de8a4c9cb35dcc2568a5da77 2024-11-07T12:56:23,492 INFO [M:0;db9ad1cb6cf9:41575 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 27e3d9c4de8a4c9cb35dcc2568a5da77 2024-11-07T12:56:23,492 INFO [M:0;db9ad1cb6cf9:41575 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/27e3d9c4de8a4c9cb35dcc2568a5da77, entries=13, sequenceid=125, filesize=6.9 K 2024-11-07T12:56:23,493 DEBUG [M:0;db9ad1cb6cf9:41575 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6515f9dfa1bb4d6bb7f03bfb81b5a8fa as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6515f9dfa1bb4d6bb7f03bfb81b5a8fa 2024-11-07T12:56:23,497 INFO [M:0;db9ad1cb6cf9:41575 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6515f9dfa1bb4d6bb7f03bfb81b5a8fa, entries=1, sequenceid=125, filesize=5.0 K 2024-11-07T12:56:23,498 DEBUG [M:0;db9ad1cb6cf9:41575 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/045f057374024d64b95a8f260abef09a as hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/045f057374024d64b95a8f260abef09a 2024-11-07T12:56:23,501 INFO [M:0;db9ad1cb6cf9:41575 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33769/user/jenkins/test-data/7a67ace1-2723-0a11-d864-cc8cd1fa5edc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/045f057374024d64b95a8f260abef09a, entries=1, sequenceid=125, filesize=4.9 K 2024-11-07T12:56:23,502 INFO [M:0;db9ad1cb6cf9:41575 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=125, compaction requested=false 2024-11-07T12:56:23,504 INFO [M:0;db9ad1cb6cf9:41575 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:56:23,504 DEBUG [M:0;db9ad1cb6cf9:41575 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730984183389Disabling compacts and flushes for region at 1730984183389Disabling writes for close at 1730984183389Obtaining lock to block concurrent updates at 1730984183389Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1730984183389Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1730984183389Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1730984183390 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1730984183390Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1730984183404 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1730984183404Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1730984183412 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1730984183426 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1730984183426Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1730984183434 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1730984183447 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1730984183447Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1730984183457 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1730984183471 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1730984183471Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d4a3e05: reopening flushed file at 1730984183481 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a6af44d: reopening flushed file at 1730984183487 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@554ef527: reopening flushed file at 1730984183492 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@488a0919: reopening flushed file at 1730984183497 (+5 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=125, compaction requested=false at 1730984183502 (+5 ms)Writing region close event to WAL at 1730984183504 (+2 ms)Closed at 1730984183504 2024-11-07T12:56:23,504 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,504 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,504 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,504 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,505 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:23,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44169 is added to blk_1073741830_1006 (size=61332) 2024-11-07T12:56:23,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38801 is added to blk_1073741830_1006 (size=61332) 2024-11-07T12:56:23,507 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:56:23,507 INFO [M:0;db9ad1cb6cf9:41575 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-07T12:56:23,507 INFO [M:0;db9ad1cb6cf9:41575 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41575 2024-11-07T12:56:23,507 INFO [M:0;db9ad1cb6cf9:41575 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:56:23,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:56:23,609 INFO [M:0;db9ad1cb6cf9:41575 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:56:23,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41575-0x1001a4ee4000000, quorum=127.0.0.1:59865, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:56:23,612 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78512cf7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:56:23,612 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5b6783f5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:56:23,612 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:56:23,612 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d7cc900{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:56:23,612 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27cee48d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/hadoop.log.dir/,STOPPED} 2024-11-07T12:56:23,615 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:56:23,615 WARN [BP-1024326402-172.17.0.2-1730984072159 heartbeating to localhost/127.0.0.1:33769 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:56:23,615 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:56:23,615 WARN [BP-1024326402-172.17.0.2-1730984072159 heartbeating to localhost/127.0.0.1:33769 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1024326402-172.17.0.2-1730984072159 (Datanode Uuid c14b1b0e-981e-4df1-a4d5-4a7378a03ce8) service to localhost/127.0.0.1:33769 2024-11-07T12:56:23,615 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/cluster_c509f59f-2b48-e0a5-b5fb-64f4eb5de5ef/data/data3/current/BP-1024326402-172.17.0.2-1730984072159 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:56:23,616 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/cluster_c509f59f-2b48-e0a5-b5fb-64f4eb5de5ef/data/data4/current/BP-1024326402-172.17.0.2-1730984072159 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:56:23,616 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:56:23,618 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3bc081d8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:56:23,619 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3aca12f3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:56:23,619 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:56:23,619 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5411f427{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:56:23,619 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1091e18a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/hadoop.log.dir/,STOPPED} 2024-11-07T12:56:23,620 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:56:23,620 WARN [BP-1024326402-172.17.0.2-1730984072159 heartbeating to localhost/127.0.0.1:33769 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:56:23,621 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:56:23,621 WARN [BP-1024326402-172.17.0.2-1730984072159 heartbeating to localhost/127.0.0.1:33769 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1024326402-172.17.0.2-1730984072159 (Datanode Uuid df3b3ded-3155-41ba-aa35-e05dd4eb1945) service to localhost/127.0.0.1:33769 2024-11-07T12:56:23,621 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/cluster_c509f59f-2b48-e0a5-b5fb-64f4eb5de5ef/data/data1/current/BP-1024326402-172.17.0.2-1730984072159 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:56:23,621 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/cluster_c509f59f-2b48-e0a5-b5fb-64f4eb5de5ef/data/data2/current/BP-1024326402-172.17.0.2-1730984072159 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:56:23,621 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:56:23,630 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@60125866{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:56:23,630 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@273a6f23{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:56:23,631 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:56:23,631 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38dc0fd7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:56:23,631 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8e4b628{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/hadoop.log.dir/,STOPPED} 2024-11-07T12:56:23,640 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-07T12:56:23,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-07T12:56:23,688 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=227 (was 204) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/db9ad1cb6cf9:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33769 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33769 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33769 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33769 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:33769 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33769 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33769 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33769 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33769 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=509 (was 486) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=322 (was 70) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7647 (was 8281) 2024-11-07T12:56:23,695 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=227, OpenFileDescriptor=509, MaxFileDescriptor=1048576, SystemLoadAverage=322, ProcessCount=11, AvailableMemoryMB=7647 2024-11-07T12:56:23,695 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T12:56:23,695 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/hadoop.log.dir so I do NOT create it in target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791 2024-11-07T12:56:23,695 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c75638a5-c5c8-fa87-3ba7-3c0bcc36acda/hadoop.tmp.dir so I do NOT create it in target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791 2024-11-07T12:56:23,695 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/cluster_978c7eb0-8174-9421-abe7-9e1b4279a1c9, deleteOnExit=true 2024-11-07T12:56:23,695 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-07T12:56:23,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/test.cache.data in system properties and HBase conf 2024-11-07T12:56:23,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T12:56:23,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/hadoop.log.dir in system properties and HBase conf 2024-11-07T12:56:23,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T12:56:23,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T12:56:23,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-07T12:56:23,696 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T12:56:23,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:56:23,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:56:23,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T12:56:23,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:56:23,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T12:56:23,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T12:56:23,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:56:23,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:56:23,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T12:56:23,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/nfs.dump.dir in system properties and HBase conf 2024-11-07T12:56:23,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/java.io.tmpdir in system properties and HBase conf 2024-11-07T12:56:23,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:56:23,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T12:56:23,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T12:56:23,709 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:56:23,762 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:56:23,765 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:56:23,766 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:56:23,766 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:56:23,766 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:56:23,767 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:56:23,767 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c411d10{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:56:23,768 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b4c94a0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:56:23,881 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d974bb{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/java.io.tmpdir/jetty-localhost-46675-hadoop-hdfs-3_4_1-tests_jar-_-any-15021958319254692360/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:56:23,882 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1cb0de45{HTTP/1.1, (http/1.1)}{localhost:46675} 2024-11-07T12:56:23,882 INFO [Time-limited test {}] server.Server(415): Started @349151ms 2024-11-07T12:56:23,894 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-07T12:56:23,940 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:56:23,942 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:56:23,943 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:56:23,943 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:56:23,943 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:56:23,943 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79ca80d8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:56:23,944 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@333ec6ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:56:24,040 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:24,040 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:24,056 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@259cffcc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/java.io.tmpdir/jetty-localhost-40685-hadoop-hdfs-3_4_1-tests_jar-_-any-3559455810840200359/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:56:24,056 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41f14207{HTTP/1.1, (http/1.1)}{localhost:40685} 2024-11-07T12:56:24,057 INFO [Time-limited test {}] server.Server(415): Started @349326ms 2024-11-07T12:56:24,058 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:56:24,084 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:56:24,087 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:56:24,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:56:24,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:56:24,087 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:56:24,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@87b2e2b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:56:24,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f9a8217{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:56:24,138 WARN [Thread-2471 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/cluster_978c7eb0-8174-9421-abe7-9e1b4279a1c9/data/data1/current/BP-700663152-172.17.0.2-1730984183715/current, will proceed with Du for space computation calculation, 2024-11-07T12:56:24,138 WARN [Thread-2472 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/cluster_978c7eb0-8174-9421-abe7-9e1b4279a1c9/data/data2/current/BP-700663152-172.17.0.2-1730984183715/current, will proceed with Du for space computation calculation, 2024-11-07T12:56:24,153 WARN [Thread-2450 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:56:24,156 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5059528a24735fbb with lease ID 0xc53bf67a6aeda060: Processing first storage report for DS-fbf3490c-96c6-46da-be5e-91ec2b0524ae from datanode DatanodeRegistration(127.0.0.1:33661, datanodeUuid=2ad52c2e-7173-458e-8ebf-166a42701a32, infoPort=38319, infoSecurePort=0, ipcPort=35295, storageInfo=lv=-57;cid=testClusterID;nsid=1971567542;c=1730984183715) 2024-11-07T12:56:24,156 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5059528a24735fbb with lease ID 0xc53bf67a6aeda060: from storage DS-fbf3490c-96c6-46da-be5e-91ec2b0524ae node DatanodeRegistration(127.0.0.1:33661, datanodeUuid=2ad52c2e-7173-458e-8ebf-166a42701a32, infoPort=38319, infoSecurePort=0, ipcPort=35295, storageInfo=lv=-57;cid=testClusterID;nsid=1971567542;c=1730984183715), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-07T12:56:24,156 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5059528a24735fbb with lease ID 0xc53bf67a6aeda060: Processing first storage report for DS-17729d33-4f88-49ec-932d-bea4a085b26a from datanode DatanodeRegistration(127.0.0.1:33661, datanodeUuid=2ad52c2e-7173-458e-8ebf-166a42701a32, infoPort=38319, infoSecurePort=0, ipcPort=35295, storageInfo=lv=-57;cid=testClusterID;nsid=1971567542;c=1730984183715) 2024-11-07T12:56:24,156 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5059528a24735fbb with lease ID 0xc53bf67a6aeda060: from storage DS-17729d33-4f88-49ec-932d-bea4a085b26a node DatanodeRegistration(127.0.0.1:33661, datanodeUuid=2ad52c2e-7173-458e-8ebf-166a42701a32, infoPort=38319, infoSecurePort=0, ipcPort=35295, storageInfo=lv=-57;cid=testClusterID;nsid=1971567542;c=1730984183715), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:56:24,204 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1cfa6b2d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/java.io.tmpdir/jetty-localhost-43251-hadoop-hdfs-3_4_1-tests_jar-_-any-16050690384106029364/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:56:24,205 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@32c86c86{HTTP/1.1, (http/1.1)}{localhost:43251} 2024-11-07T12:56:24,205 INFO [Time-limited test {}] server.Server(415): Started @349474ms 2024-11-07T12:56:24,206 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:56:24,284 WARN [Thread-2497 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/cluster_978c7eb0-8174-9421-abe7-9e1b4279a1c9/data/data3/current/BP-700663152-172.17.0.2-1730984183715/current, will proceed with Du for space computation calculation, 2024-11-07T12:56:24,284 WARN [Thread-2498 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/cluster_978c7eb0-8174-9421-abe7-9e1b4279a1c9/data/data4/current/BP-700663152-172.17.0.2-1730984183715/current, will proceed with Du for space computation calculation, 2024-11-07T12:56:24,311 WARN [Thread-2486 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:56:24,313 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x174047ba1a3735c with lease ID 0xc53bf67a6aeda061: Processing first storage report for DS-f4b88364-dbae-48b8-82c2-3fcf5015bec4 from datanode DatanodeRegistration(127.0.0.1:46323, datanodeUuid=37f85710-5889-41a5-be8b-09b38bcc6c26, infoPort=46079, infoSecurePort=0, ipcPort=39573, storageInfo=lv=-57;cid=testClusterID;nsid=1971567542;c=1730984183715) 2024-11-07T12:56:24,313 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x174047ba1a3735c with lease ID 0xc53bf67a6aeda061: from storage DS-f4b88364-dbae-48b8-82c2-3fcf5015bec4 node DatanodeRegistration(127.0.0.1:46323, datanodeUuid=37f85710-5889-41a5-be8b-09b38bcc6c26, infoPort=46079, infoSecurePort=0, ipcPort=39573, storageInfo=lv=-57;cid=testClusterID;nsid=1971567542;c=1730984183715), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:56:24,313 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x174047ba1a3735c with lease ID 0xc53bf67a6aeda061: Processing first storage report for DS-cef7f895-99af-4173-8e9f-df28da9ff589 from datanode DatanodeRegistration(127.0.0.1:46323, datanodeUuid=37f85710-5889-41a5-be8b-09b38bcc6c26, infoPort=46079, infoSecurePort=0, ipcPort=39573, storageInfo=lv=-57;cid=testClusterID;nsid=1971567542;c=1730984183715) 2024-11-07T12:56:24,313 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x174047ba1a3735c with lease ID 0xc53bf67a6aeda061: from storage DS-cef7f895-99af-4173-8e9f-df28da9ff589 node DatanodeRegistration(127.0.0.1:46323, datanodeUuid=37f85710-5889-41a5-be8b-09b38bcc6c26, infoPort=46079, infoSecurePort=0, ipcPort=39573, storageInfo=lv=-57;cid=testClusterID;nsid=1971567542;c=1730984183715), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:56:24,327 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791 2024-11-07T12:56:24,330 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/cluster_978c7eb0-8174-9421-abe7-9e1b4279a1c9/zookeeper_0, clientPort=58894, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/cluster_978c7eb0-8174-9421-abe7-9e1b4279a1c9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/cluster_978c7eb0-8174-9421-abe7-9e1b4279a1c9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T12:56:24,331 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58894 2024-11-07T12:56:24,331 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:56:24,332 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:56:24,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:56:24,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:56:24,341 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971 with version=8 2024-11-07T12:56:24,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34313/user/jenkins/test-data/4c0bfd4d-e0b4-10e8-5273-27c98fb3d979/hbase-staging 2024-11-07T12:56:24,343 INFO [Time-limited test {}] client.ConnectionUtils(128): master/db9ad1cb6cf9:0 server-side Connection retries=45 2024-11-07T12:56:24,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:56:24,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:56:24,343 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:56:24,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:56:24,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:56:24,343 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-07T12:56:24,343 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:56:24,344 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37651 2024-11-07T12:56:24,345 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37651 connecting to ZooKeeper ensemble=127.0.0.1:58894 2024-11-07T12:56:24,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:376510x0, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:56:24,353 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37651-0x1001a5094a70000 connected 2024-11-07T12:56:24,373 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:56:24,375 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:56:24,377 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:56:24,377 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971, hbase.cluster.distributed=false 2024-11-07T12:56:24,378 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:56:24,379 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37651 2024-11-07T12:56:24,379 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37651 2024-11-07T12:56:24,379 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37651 2024-11-07T12:56:24,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37651 2024-11-07T12:56:24,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37651 2024-11-07T12:56:24,395 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/db9ad1cb6cf9:0 server-side Connection retries=45 2024-11-07T12:56:24,395 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:56:24,395 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:56:24,395 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:56:24,395 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:56:24,395 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:56:24,395 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:56:24,395 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:56:24,396 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44351 2024-11-07T12:56:24,396 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44351 connecting to ZooKeeper ensemble=127.0.0.1:58894 2024-11-07T12:56:24,397 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:56:24,398 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:56:24,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:443510x0, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:56:24,402 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:443510x0, quorum=127.0.0.1:58894, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:56:24,402 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44351-0x1001a5094a70001 connected 2024-11-07T12:56:24,403 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:56:24,403 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:56:24,403 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:56:24,404 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:56:24,405 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44351 2024-11-07T12:56:24,405 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44351 2024-11-07T12:56:24,405 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44351 2024-11-07T12:56:24,405 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44351 2024-11-07T12:56:24,406 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44351 2024-11-07T12:56:24,417 DEBUG [M:0;db9ad1cb6cf9:37651 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db9ad1cb6cf9:37651 2024-11-07T12:56:24,417 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/db9ad1cb6cf9,37651,1730984184343 2024-11-07T12:56:24,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:56:24,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:56:24,419 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db9ad1cb6cf9,37651,1730984184343 2024-11-07T12:56:24,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:56:24,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:24,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:24,420 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:56:24,421 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db9ad1cb6cf9,37651,1730984184343 from backup master directory 2024-11-07T12:56:24,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db9ad1cb6cf9,37651,1730984184343 2024-11-07T12:56:24,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:56:24,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:56:24,422 WARN [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:56:24,422 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db9ad1cb6cf9,37651,1730984184343 2024-11-07T12:56:24,425 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/hbase.id] with ID: accca5a6-62cb-4f15-a0b7-967d3bbd9b7a 2024-11-07T12:56:24,425 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/.tmp/hbase.id 2024-11-07T12:56:24,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:56:24,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:56:24,431 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/.tmp/hbase.id]:[hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/hbase.id] 2024-11-07T12:56:24,443 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:56:24,443 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-07T12:56:24,444 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-07T12:56:24,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:24,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:24,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:56:24,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:56:24,453 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:56:24,454 INFO [regionserver/db9ad1cb6cf9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:56:24,454 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T12:56:24,454 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:56:24,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:56:24,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:56:24,462 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store 2024-11-07T12:56:24,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:56:24,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:56:24,467 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:56:24,467 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:56:24,467 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:56:24,467 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:56:24,467 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:56:24,467 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:56:24,468 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:56:24,468 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730984184467Disabling compacts and flushes for region at 1730984184467Disabling writes for close at 1730984184467Writing region close event to WAL at 1730984184468 (+1 ms)Closed at 1730984184468 2024-11-07T12:56:24,468 WARN [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/.initializing 2024-11-07T12:56:24,468 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/WALs/db9ad1cb6cf9,37651,1730984184343 2024-11-07T12:56:24,471 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C37651%2C1730984184343, suffix=, logDir=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/WALs/db9ad1cb6cf9,37651,1730984184343, archiveDir=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/oldWALs, maxLogs=10 2024-11-07T12:56:24,471 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C37651%2C1730984184343.1730984184471 2024-11-07T12:56:24,475 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/WALs/db9ad1cb6cf9,37651,1730984184343/db9ad1cb6cf9%2C37651%2C1730984184343.1730984184471 2024-11-07T12:56:24,476 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46079:46079),(127.0.0.1/127.0.0.1:38319:38319)] 2024-11-07T12:56:24,479 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:56:24,479 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:56:24,479 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:56:24,479 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:56:24,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:56:24,482 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T12:56:24,482 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:56:24,482 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:56:24,482 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:56:24,483 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T12:56:24,483 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:56:24,484 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:56:24,484 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:56:24,484 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T12:56:24,485 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:56:24,485 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:56:24,485 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:56:24,486 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T12:56:24,486 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:56:24,486 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:56:24,486 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:56:24,487 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:56:24,487 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:56:24,488 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:56:24,488 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:56:24,489 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T12:56:24,490 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:56:24,491 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:56:24,492 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718899, jitterRate=-0.08587299287319183}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T12:56:24,492 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1730984184480Initializing all the Stores at 1730984184480Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984184480Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984184480Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984184480Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984184480Cleaning up temporary data from old regions at 1730984184488 (+8 ms)Region opened successfully at 1730984184492 (+4 ms) 2024-11-07T12:56:24,493 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T12:56:24,495 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e23dfd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9ad1cb6cf9/172.17.0.2:0 2024-11-07T12:56:24,496 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-07T12:56:24,496 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T12:56:24,496 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T12:56:24,496 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T12:56:24,496 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-07T12:56:24,497 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-07T12:56:24,497 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T12:56:24,499 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T12:56:24,500 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T12:56:24,501 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-07T12:56:24,501 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T12:56:24,502 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T12:56:24,503 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-07T12:56:24,504 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T12:56:24,505 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T12:56:24,506 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-07T12:56:24,507 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T12:56:24,508 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T12:56:24,510 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T12:56:24,511 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T12:56:24,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:56:24,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:56:24,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:24,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:24,513 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=db9ad1cb6cf9,37651,1730984184343, sessionid=0x1001a5094a70000, setting cluster-up flag (Was=false) 2024-11-07T12:56:24,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:24,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:24,520 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T12:56:24,521 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9ad1cb6cf9,37651,1730984184343 2024-11-07T12:56:24,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:24,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:24,528 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T12:56:24,528 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9ad1cb6cf9,37651,1730984184343 2024-11-07T12:56:24,529 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-07T12:56:24,531 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-07T12:56:24,531 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-07T12:56:24,531 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T12:56:24,531 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db9ad1cb6cf9,37651,1730984184343 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T12:56:24,532 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:56:24,532 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:56:24,532 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:56:24,533 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:56:24,533 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db9ad1cb6cf9:0, corePoolSize=10, maxPoolSize=10 2024-11-07T12:56:24,533 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:56:24,533 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:56:24,533 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:56:24,536 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:56:24,536 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730984214536 2024-11-07T12:56:24,536 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-07T12:56:24,536 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T12:56:24,536 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T12:56:24,536 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T12:56:24,536 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T12:56:24,537 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T12:56:24,537 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T12:56:24,537 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:24,537 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:56:24,538 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T12:56:24,540 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T12:56:24,541 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T12:56:24,541 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T12:56:24,541 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T12:56:24,541 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T12:56:24,543 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730984184541,5,FailOnTimeoutGroup] 2024-11-07T12:56:24,543 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730984184543,5,FailOnTimeoutGroup] 2024-11-07T12:56:24,543 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:24,543 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T12:56:24,543 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:24,543 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:24,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:56:24,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741831_1007 (size=1321) 2024-11-07T12:56:24,548 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-07T12:56:24,548 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971 2024-11-07T12:56:24,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:56:24,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:56:24,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:56:24,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:56:24,556 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:56:24,556 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:56:24,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:56:24,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:56:24,558 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:56:24,558 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:56:24,558 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:56:24,558 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:56:24,559 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:56:24,559 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:56:24,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:56:24,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:56:24,560 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:56:24,560 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:56:24,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:56:24,561 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:56:24,561 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/data/hbase/meta/1588230740 2024-11-07T12:56:24,561 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/data/hbase/meta/1588230740 2024-11-07T12:56:24,562 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:56:24,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:56:24,563 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:56:24,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:56:24,565 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:56:24,566 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=796141, jitterRate=0.012346282601356506}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:56:24,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1730984184554Initializing all the Stores at 1730984184555 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984184555Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984184555Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984184555Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984184555Cleaning up temporary data from old regions at 1730984184563 (+8 ms)Region opened successfully at 1730984184566 (+3 ms) 2024-11-07T12:56:24,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:56:24,566 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:56:24,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:56:24,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:56:24,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:56:24,567 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:56:24,567 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730984184566Disabling compacts and flushes for region at 1730984184566Disabling writes for close at 1730984184566Writing region close event to WAL at 1730984184567 (+1 ms)Closed at 1730984184567 2024-11-07T12:56:24,568 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:56:24,568 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-07T12:56:24,568 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T12:56:24,569 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:56:24,570 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T12:56:24,607 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HRegionServer(746): ClusterId : accca5a6-62cb-4f15-a0b7-967d3bbd9b7a 2024-11-07T12:56:24,607 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:56:24,609 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:56:24,609 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:56:24,611 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:56:24,611 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a1b37c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9ad1cb6cf9/172.17.0.2:0 2024-11-07T12:56:24,623 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db9ad1cb6cf9:44351 2024-11-07T12:56:24,623 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T12:56:24,623 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T12:56:24,623 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T12:56:24,624 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HRegionServer(2659): reportForDuty to master=db9ad1cb6cf9,37651,1730984184343 with port=44351, startcode=1730984184394 2024-11-07T12:56:24,624 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:56:24,626 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40617, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:56:24,626 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37651 {}] master.ServerManager(363): Checking decommissioned status of RegionServer db9ad1cb6cf9,44351,1730984184394 2024-11-07T12:56:24,626 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37651 {}] master.ServerManager(517): Registering regionserver=db9ad1cb6cf9,44351,1730984184394 2024-11-07T12:56:24,628 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971 2024-11-07T12:56:24,628 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45175 2024-11-07T12:56:24,628 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T12:56:24,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:56:24,629 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] zookeeper.ZKUtil(111): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db9ad1cb6cf9,44351,1730984184394 2024-11-07T12:56:24,630 WARN [RS:0;db9ad1cb6cf9:44351 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:56:24,630 INFO [RS:0;db9ad1cb6cf9:44351 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:56:24,630 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/WALs/db9ad1cb6cf9,44351,1730984184394 2024-11-07T12:56:24,630 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db9ad1cb6cf9,44351,1730984184394] 2024-11-07T12:56:24,634 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:56:24,636 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:56:24,637 INFO [RS:0;db9ad1cb6cf9:44351 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:56:24,637 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:24,637 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T12:56:24,637 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T12:56:24,637 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:24,638 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:56:24,638 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:56:24,638 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:56:24,638 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:56:24,638 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:56:24,638 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db9ad1cb6cf9:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:56:24,638 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:56:24,638 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:56:24,638 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:56:24,638 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:56:24,638 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:56:24,638 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db9ad1cb6cf9:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:56:24,638 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db9ad1cb6cf9:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:56:24,638 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db9ad1cb6cf9:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:56:24,638 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:24,638 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:24,638 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:24,638 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:24,638 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:24,638 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,44351,1730984184394-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:56:24,652 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:56:24,652 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,44351,1730984184394-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:24,652 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:24,652 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.Replication(171): db9ad1cb6cf9,44351,1730984184394 started 2024-11-07T12:56:24,665 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:24,665 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HRegionServer(1482): Serving as db9ad1cb6cf9,44351,1730984184394, RpcServer on db9ad1cb6cf9/172.17.0.2:44351, sessionid=0x1001a5094a70001 2024-11-07T12:56:24,666 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:56:24,666 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db9ad1cb6cf9,44351,1730984184394 2024-11-07T12:56:24,666 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9ad1cb6cf9,44351,1730984184394' 2024-11-07T12:56:24,666 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:56:24,666 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:56:24,666 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:56:24,666 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:56:24,667 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db9ad1cb6cf9,44351,1730984184394 2024-11-07T12:56:24,667 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9ad1cb6cf9,44351,1730984184394' 2024-11-07T12:56:24,667 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:56:24,667 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:56:24,667 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:56:24,667 INFO [RS:0;db9ad1cb6cf9:44351 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:56:24,667 INFO [RS:0;db9ad1cb6cf9:44351 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:56:24,720 WARN [db9ad1cb6cf9:37651 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-07T12:56:24,769 INFO [RS:0;db9ad1cb6cf9:44351 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C44351%2C1730984184394, suffix=, logDir=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/WALs/db9ad1cb6cf9,44351,1730984184394, archiveDir=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/oldWALs, maxLogs=32 2024-11-07T12:56:24,769 INFO [RS:0;db9ad1cb6cf9:44351 {}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C44351%2C1730984184394.1730984184769 2024-11-07T12:56:24,774 INFO [RS:0;db9ad1cb6cf9:44351 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/WALs/db9ad1cb6cf9,44351,1730984184394/db9ad1cb6cf9%2C44351%2C1730984184394.1730984184769 2024-11-07T12:56:24,775 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46079:46079),(127.0.0.1/127.0.0.1:38319:38319)] 2024-11-07T12:56:24,970 DEBUG [db9ad1cb6cf9:37651 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-07T12:56:24,971 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db9ad1cb6cf9,44351,1730984184394 2024-11-07T12:56:24,972 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9ad1cb6cf9,44351,1730984184394, state=OPENING 2024-11-07T12:56:24,973 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T12:56:24,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:24,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:24,975 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:56:24,975 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:56:24,975 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:56:24,975 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,44351,1730984184394}] 2024-11-07T12:56:25,040 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,42011,1730983938993/db9ad1cb6cf9%2C42011%2C1730983938993.meta.1730983939786.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:25,040 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37737/user/jenkins/test-data/2d3e126f-3ad1-eb27-3028-10db73c84201/WALs/db9ad1cb6cf9,43605,1730983939942/db9ad1cb6cf9%2C43605%2C1730983939942.1730983940156 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-07T12:56:25,128 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T12:56:25,129 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42645, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T12:56:25,133 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-07T12:56:25,133 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:56:25,134 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9ad1cb6cf9%2C44351%2C1730984184394.meta, suffix=.meta, logDir=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/WALs/db9ad1cb6cf9,44351,1730984184394, archiveDir=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/oldWALs, maxLogs=32 2024-11-07T12:56:25,135 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor db9ad1cb6cf9%2C44351%2C1730984184394.meta.1730984185135.meta 2024-11-07T12:56:25,139 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/WALs/db9ad1cb6cf9,44351,1730984184394/db9ad1cb6cf9%2C44351%2C1730984184394.meta.1730984185135.meta 2024-11-07T12:56:25,140 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46079:46079),(127.0.0.1/127.0.0.1:38319:38319)] 2024-11-07T12:56:25,144 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:56:25,144 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T12:56:25,144 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T12:56:25,145 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T12:56:25,145 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T12:56:25,145 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:56:25,145 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-07T12:56:25,145 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-07T12:56:25,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:56:25,147 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:56:25,147 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:56:25,147 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:56:25,147 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T12:56:25,148 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T12:56:25,148 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:56:25,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:56:25,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:56:25,149 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:56:25,149 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:56:25,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:56:25,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:56:25,150 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:56:25,150 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:56:25,150 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:56:25,150 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T12:56:25,151 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/data/hbase/meta/1588230740 2024-11-07T12:56:25,152 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/data/hbase/meta/1588230740 2024-11-07T12:56:25,153 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T12:56:25,153 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T12:56:25,153 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-07T12:56:25,154 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T12:56:25,155 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742194, jitterRate=-0.05625225603580475}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-07T12:56:25,155 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-07T12:56:25,155 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1730984185145Writing region info on filesystem at 1730984185145Initializing all the Stores at 1730984185145Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984185145Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984185146 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730984185146Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730984185146Cleaning up temporary data from old regions at 1730984185153 (+7 ms)Running coprocessor post-open hooks at 1730984185155 (+2 ms)Region opened successfully at 1730984185155 2024-11-07T12:56:25,156 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730984185127 2024-11-07T12:56:25,158 DEBUG [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T12:56:25,158 INFO [RS_OPEN_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-07T12:56:25,159 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db9ad1cb6cf9,44351,1730984184394 2024-11-07T12:56:25,159 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9ad1cb6cf9,44351,1730984184394, state=OPEN 2024-11-07T12:56:25,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:56:25,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:56:25,164 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,44351,1730984184394 2024-11-07T12:56:25,164 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:56:25,164 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:56:25,166 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T12:56:25,166 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=db9ad1cb6cf9,44351,1730984184394 in 189 msec 2024-11-07T12:56:25,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T12:56:25,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 598 msec 2024-11-07T12:56:25,168 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:56:25,168 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-07T12:56:25,169 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:56:25,169 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db9ad1cb6cf9,44351,1730984184394, seqNum=-1] 2024-11-07T12:56:25,169 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:56:25,171 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56617, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:56:25,174 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 643 msec 2024-11-07T12:56:25,175 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730984185174, completionTime=-1 2024-11-07T12:56:25,175 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-07T12:56:25,175 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-07T12:56:25,176 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-07T12:56:25,176 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730984245176 2024-11-07T12:56:25,176 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730984305176 2024-11-07T12:56:25,176 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-07T12:56:25,177 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,37651,1730984184343-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:25,177 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,37651,1730984184343-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:25,177 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,37651,1730984184343-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:25,177 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db9ad1cb6cf9:37651, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:25,177 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:25,177 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:25,178 DEBUG [master/db9ad1cb6cf9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-07T12:56:25,180 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.758sec 2024-11-07T12:56:25,180 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T12:56:25,180 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T12:56:25,180 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T12:56:25,180 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T12:56:25,180 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T12:56:25,180 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,37651,1730984184343-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:56:25,180 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,37651,1730984184343-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T12:56:25,182 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-07T12:56:25,182 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T12:56:25,182 INFO [master/db9ad1cb6cf9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9ad1cb6cf9,37651,1730984184343-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:56:25,207 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73bc7eb2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:56:25,207 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request db9ad1cb6cf9,37651,-1 for getting cluster id 2024-11-07T12:56:25,207 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-07T12:56:25,208 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'accca5a6-62cb-4f15-a0b7-967d3bbd9b7a' 2024-11-07T12:56:25,209 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-07T12:56:25,209 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "accca5a6-62cb-4f15-a0b7-967d3bbd9b7a" 2024-11-07T12:56:25,209 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@780b7386, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:56:25,209 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [db9ad1cb6cf9,37651,-1] 2024-11-07T12:56:25,209 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-07T12:56:25,210 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:56:25,210 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48966, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-07T12:56:25,211 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22ed154c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:56:25,211 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T12:56:25,212 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db9ad1cb6cf9,44351,1730984184394, seqNum=-1] 2024-11-07T12:56:25,212 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:56:25,213 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43122, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:56:25,214 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=db9ad1cb6cf9,37651,1730984184343 2024-11-07T12:56:25,215 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:56:25,217 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-07T12:56:25,217 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:56:25,218 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/WALs/test.com,8080,1, archiveDir=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/oldWALs, maxLogs=32 2024-11-07T12:56:25,219 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1730984185219 2024-11-07T12:56:25,223 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/WALs/test.com,8080,1/test.com%2C8080%2C1.1730984185219 2024-11-07T12:56:25,225 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38319:38319),(127.0.0.1/127.0.0.1:46079:46079)] 2024-11-07T12:56:25,230 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1730984185230 2024-11-07T12:56:25,236 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,236 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,236 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,236 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,236 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,236 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/WALs/test.com,8080,1/test.com%2C8080%2C1.1730984185219 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/WALs/test.com,8080,1/test.com%2C8080%2C1.1730984185230 2024-11-07T12:56:25,238 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46079:46079),(127.0.0.1/127.0.0.1:38319:38319)] 2024-11-07T12:56:25,238 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/WALs/test.com,8080,1/test.com%2C8080%2C1.1730984185219 is not closed yet, will try archiving it next time 2024-11-07T12:56:25,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741835_1011 (size=93) 2024-11-07T12:56:25,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741835_1011 (size=93) 2024-11-07T12:56:25,239 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,239 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,239 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,239 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,239 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,240 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/WALs/test.com,8080,1/test.com%2C8080%2C1.1730984185219 to hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/oldWALs/test.com%2C8080%2C1.1730984185219 2024-11-07T12:56:25,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741836_1012 (size=93) 2024-11-07T12:56:25,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741836_1012 (size=93) 2024-11-07T12:56:25,244 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/oldWALs 2024-11-07T12:56:25,244 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1730984185230) 2024-11-07T12:56:25,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-07T12:56:25,244 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:56:25,244 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:56:25,244 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:56:25,245 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:56:25,245 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-07T12:56:25,245 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T12:56:25,245 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=406390709, stopped=false 2024-11-07T12:56:25,245 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=db9ad1cb6cf9,37651,1730984184343 2024-11-07T12:56:25,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:56:25,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:56:25,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:25,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:25,247 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:56:25,247 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T12:56:25,247 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:56:25,247 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:56:25,247 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'db9ad1cb6cf9,44351,1730984184394' ***** 2024-11-07T12:56:25,247 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T12:56:25,247 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:56:25,247 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:56:25,247 INFO [RS:0;db9ad1cb6cf9:44351 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:56:25,247 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T12:56:25,247 INFO [RS:0;db9ad1cb6cf9:44351 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:56:25,247 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HRegionServer(959): stopping server db9ad1cb6cf9,44351,1730984184394 2024-11-07T12:56:25,248 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:56:25,248 INFO [RS:0;db9ad1cb6cf9:44351 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;db9ad1cb6cf9:44351. 2024-11-07T12:56:25,248 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:56:25,248 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:56:25,248 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:56:25,248 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:56:25,248 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:56:25,248 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:56:25,248 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-07T12:56:25,248 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-07T12:56:25,248 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-07T12:56:25,248 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-07T12:56:25,248 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:56:25,248 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T12:56:25,248 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T12:56:25,248 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:56:25,248 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:56:25,249 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-07T12:56:25,265 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/data/hbase/meta/1588230740/.tmp/ns/871750c0bef646c0bc7ad723fab351a2 is 43, key is default/ns:d/1730984185171/Put/seqid=0 2024-11-07T12:56:25,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741837_1013 (size=5153) 2024-11-07T12:56:25,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741837_1013 (size=5153) 2024-11-07T12:56:25,271 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/data/hbase/meta/1588230740/.tmp/ns/871750c0bef646c0bc7ad723fab351a2 2024-11-07T12:56:25,277 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/data/hbase/meta/1588230740/.tmp/ns/871750c0bef646c0bc7ad723fab351a2 as hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/data/hbase/meta/1588230740/ns/871750c0bef646c0bc7ad723fab351a2 2024-11-07T12:56:25,281 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/data/hbase/meta/1588230740/ns/871750c0bef646c0bc7ad723fab351a2, entries=2, sequenceid=6, filesize=5.0 K 2024-11-07T12:56:25,282 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 34ms, sequenceid=6, compaction requested=false 2024-11-07T12:56:25,286 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-07T12:56:25,286 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:56:25,286 INFO [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T12:56:25,287 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730984185248Running coprocessor pre-close hooks at 1730984185248Disabling compacts and flushes for region at 1730984185248Disabling writes for close at 1730984185248Obtaining lock to block concurrent updates at 1730984185249 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1730984185249Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1730984185249Flushing stores of hbase:meta,,1.1588230740 at 1730984185249Flushing 1588230740/ns: creating writer at 1730984185250 (+1 ms)Flushing 1588230740/ns: appending metadata at 1730984185264 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1730984185264Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73569dc5: reopening flushed file at 1730984185276 (+12 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 34ms, sequenceid=6, compaction requested=false at 1730984185282 (+6 ms)Writing region close event to WAL at 1730984185283 (+1 ms)Running coprocessor post-close hooks at 1730984185286 (+3 ms)Closed at 1730984185286 2024-11-07T12:56:25,287 DEBUG [RS_CLOSE_META-regionserver/db9ad1cb6cf9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T12:56:25,448 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HRegionServer(976): stopping server db9ad1cb6cf9,44351,1730984184394; all regions closed. 2024-11-07T12:56:25,449 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,449 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,449 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,449 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,449 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741834_1010 (size=1152) 2024-11-07T12:56:25,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741834_1010 (size=1152) 2024-11-07T12:56:25,453 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/oldWALs 2024-11-07T12:56:25,453 INFO [RS:0;db9ad1cb6cf9:44351 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db9ad1cb6cf9%2C44351%2C1730984184394.meta:.meta(num 1730984185135) 2024-11-07T12:56:25,454 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,454 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,454 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,454 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,454 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741833_1009 (size=93) 2024-11-07T12:56:25,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741833_1009 (size=93) 2024-11-07T12:56:25,458 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/oldWALs 2024-11-07T12:56:25,458 INFO [RS:0;db9ad1cb6cf9:44351 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db9ad1cb6cf9%2C44351%2C1730984184394:(num 1730984184769) 2024-11-07T12:56:25,458 DEBUG [RS:0;db9ad1cb6cf9:44351 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:56:25,458 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:56:25,458 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:56:25,458 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.ChoreService(370): Chore service for: regionserver/db9ad1cb6cf9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-07T12:56:25,459 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:56:25,459 INFO [regionserver/db9ad1cb6cf9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:56:25,459 INFO [RS:0;db9ad1cb6cf9:44351 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44351 2024-11-07T12:56:25,462 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:56:25,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:56:25,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db9ad1cb6cf9,44351,1730984184394 2024-11-07T12:56:25,463 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db9ad1cb6cf9,44351,1730984184394] 2024-11-07T12:56:25,464 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/db9ad1cb6cf9,44351,1730984184394 already deleted, retry=false 2024-11-07T12:56:25,464 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; db9ad1cb6cf9,44351,1730984184394 expired; onlineServers=0 2024-11-07T12:56:25,464 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'db9ad1cb6cf9,37651,1730984184343' ***** 2024-11-07T12:56:25,464 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T12:56:25,464 INFO [M:0;db9ad1cb6cf9:37651 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T12:56:25,465 INFO [M:0;db9ad1cb6cf9:37651 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T12:56:25,465 DEBUG [M:0;db9ad1cb6cf9:37651 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T12:56:25,465 DEBUG [M:0;db9ad1cb6cf9:37651 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T12:56:25,465 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T12:56:25,465 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730984184541 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.large.0-1730984184541,5,FailOnTimeoutGroup] 2024-11-07T12:56:25,465 DEBUG [master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730984184543 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9ad1cb6cf9:0:becomeActiveMaster-HFileCleaner.small.0-1730984184543,5,FailOnTimeoutGroup] 2024-11-07T12:56:25,465 INFO [M:0;db9ad1cb6cf9:37651 {}] hbase.ChoreService(370): Chore service for: master/db9ad1cb6cf9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-07T12:56:25,465 INFO [M:0;db9ad1cb6cf9:37651 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T12:56:25,465 DEBUG [M:0;db9ad1cb6cf9:37651 {}] master.HMaster(1795): Stopping service threads 2024-11-07T12:56:25,465 INFO [M:0;db9ad1cb6cf9:37651 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T12:56:25,465 INFO [M:0;db9ad1cb6cf9:37651 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T12:56:25,465 INFO [M:0;db9ad1cb6cf9:37651 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T12:56:25,465 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T12:56:25,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:56:25,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:56:25,466 DEBUG [M:0;db9ad1cb6cf9:37651 {}] zookeeper.ZKUtil(347): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T12:56:25,466 WARN [M:0;db9ad1cb6cf9:37651 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T12:56:25,467 INFO [M:0;db9ad1cb6cf9:37651 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/.lastflushedseqids 2024-11-07T12:56:25,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741838_1014 (size=99) 2024-11-07T12:56:25,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741838_1014 (size=99) 2024-11-07T12:56:25,472 INFO [M:0;db9ad1cb6cf9:37651 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-07T12:56:25,472 INFO [M:0;db9ad1cb6cf9:37651 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T12:56:25,472 DEBUG [M:0;db9ad1cb6cf9:37651 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:56:25,472 INFO [M:0;db9ad1cb6cf9:37651 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:56:25,472 DEBUG [M:0;db9ad1cb6cf9:37651 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:56:25,472 DEBUG [M:0;db9ad1cb6cf9:37651 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:56:25,472 DEBUG [M:0;db9ad1cb6cf9:37651 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:56:25,472 INFO [M:0;db9ad1cb6cf9:37651 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-07T12:56:25,494 DEBUG [M:0;db9ad1cb6cf9:37651 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e8b0d2937c0c492199e2c5f3fc55018b is 82, key is hbase:meta,,1/info:regioninfo/1730984185158/Put/seqid=0 2024-11-07T12:56:25,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741839_1015 (size=5672) 2024-11-07T12:56:25,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741839_1015 (size=5672) 2024-11-07T12:56:25,499 INFO [M:0;db9ad1cb6cf9:37651 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e8b0d2937c0c492199e2c5f3fc55018b 2024-11-07T12:56:25,516 DEBUG [M:0;db9ad1cb6cf9:37651 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/44d722cf310940d19447433c00391a84 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1730984185174/Put/seqid=0 2024-11-07T12:56:25,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741840_1016 (size=5275) 2024-11-07T12:56:25,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741840_1016 (size=5275) 2024-11-07T12:56:25,521 INFO [M:0;db9ad1cb6cf9:37651 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/44d722cf310940d19447433c00391a84 2024-11-07T12:56:25,537 DEBUG [M:0;db9ad1cb6cf9:37651 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e503507094844808ae2a3da30b6aa26b is 69, key is db9ad1cb6cf9,44351,1730984184394/rs:state/1730984184626/Put/seqid=0 2024-11-07T12:56:25,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741841_1017 (size=5156) 2024-11-07T12:56:25,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741841_1017 (size=5156) 2024-11-07T12:56:25,542 INFO [M:0;db9ad1cb6cf9:37651 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e503507094844808ae2a3da30b6aa26b 2024-11-07T12:56:25,558 DEBUG [M:0;db9ad1cb6cf9:37651 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1e7308d2b71d4d2da14bebbc5bbf02bb is 52, key is load_balancer_on/state:d/1730984185216/Put/seqid=0 2024-11-07T12:56:25,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741842_1018 (size=5056) 2024-11-07T12:56:25,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741842_1018 (size=5056) 2024-11-07T12:56:25,563 INFO [M:0;db9ad1cb6cf9:37651 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1e7308d2b71d4d2da14bebbc5bbf02bb 2024-11-07T12:56:25,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:56:25,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44351-0x1001a5094a70001, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:56:25,563 INFO [RS:0;db9ad1cb6cf9:44351 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:56:25,563 INFO [RS:0;db9ad1cb6cf9:44351 {}] regionserver.HRegionServer(1031): Exiting; stopping=db9ad1cb6cf9,44351,1730984184394; zookeeper connection closed. 2024-11-07T12:56:25,564 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7161bab1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7161bab1 2024-11-07T12:56:25,564 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-07T12:56:25,567 DEBUG [M:0;db9ad1cb6cf9:37651 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e8b0d2937c0c492199e2c5f3fc55018b as hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e8b0d2937c0c492199e2c5f3fc55018b 2024-11-07T12:56:25,570 INFO [M:0;db9ad1cb6cf9:37651 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e8b0d2937c0c492199e2c5f3fc55018b, entries=8, sequenceid=29, filesize=5.5 K 2024-11-07T12:56:25,571 DEBUG [M:0;db9ad1cb6cf9:37651 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/44d722cf310940d19447433c00391a84 as hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/44d722cf310940d19447433c00391a84 2024-11-07T12:56:25,574 INFO [M:0;db9ad1cb6cf9:37651 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/44d722cf310940d19447433c00391a84, entries=3, sequenceid=29, filesize=5.2 K 2024-11-07T12:56:25,575 DEBUG [M:0;db9ad1cb6cf9:37651 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e503507094844808ae2a3da30b6aa26b as hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e503507094844808ae2a3da30b6aa26b 2024-11-07T12:56:25,578 INFO [M:0;db9ad1cb6cf9:37651 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e503507094844808ae2a3da30b6aa26b, entries=1, sequenceid=29, filesize=5.0 K 2024-11-07T12:56:25,579 DEBUG [M:0;db9ad1cb6cf9:37651 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1e7308d2b71d4d2da14bebbc5bbf02bb as hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1e7308d2b71d4d2da14bebbc5bbf02bb 2024-11-07T12:56:25,582 INFO [M:0;db9ad1cb6cf9:37651 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45175/user/jenkins/test-data/2f5a98c8-bc50-c520-6e47-b519a221f971/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1e7308d2b71d4d2da14bebbc5bbf02bb, entries=1, sequenceid=29, filesize=4.9 K 2024-11-07T12:56:25,583 INFO [M:0;db9ad1cb6cf9:37651 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=29, compaction requested=false 2024-11-07T12:56:25,585 INFO [M:0;db9ad1cb6cf9:37651 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:56:25,585 DEBUG [M:0;db9ad1cb6cf9:37651 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730984185472Disabling compacts and flushes for region at 1730984185472Disabling writes for close at 1730984185472Obtaining lock to block concurrent updates at 1730984185472Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1730984185472Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1730984185473 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1730984185473Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1730984185473Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1730984185494 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1730984185494Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1730984185503 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1730984185516 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1730984185516Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1730984185524 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1730984185537 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1730984185537Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1730984185545 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1730984185558 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1730984185558Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61280679: reopening flushed file at 1730984185566 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32ea3835: reopening flushed file at 1730984185570 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f457212: reopening flushed file at 1730984185575 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73caf0d4: reopening flushed file at 1730984185579 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=29, compaction requested=false at 1730984185583 (+4 ms)Writing region close event to WAL at 1730984185585 (+2 ms)Closed at 1730984185585 2024-11-07T12:56:25,585 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,585 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,585 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,586 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,586 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-07T12:56:25,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33661 is added to blk_1073741830_1006 (size=10311) 2024-11-07T12:56:25,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46323 is added to blk_1073741830_1006 (size=10311) 2024-11-07T12:56:25,588 INFO [M:0;db9ad1cb6cf9:37651 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-07T12:56:25,588 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T12:56:25,588 INFO [M:0;db9ad1cb6cf9:37651 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37651 2024-11-07T12:56:25,589 INFO [M:0;db9ad1cb6cf9:37651 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T12:56:25,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:56:25,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37651-0x1001a5094a70000, quorum=127.0.0.1:58894, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:56:25,691 INFO [M:0;db9ad1cb6cf9:37651 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T12:56:25,693 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1cfa6b2d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:56:25,694 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@32c86c86{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:56:25,694 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:56:25,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f9a8217{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:56:25,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@87b2e2b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/hadoop.log.dir/,STOPPED} 2024-11-07T12:56:25,695 WARN [BP-700663152-172.17.0.2-1730984183715 heartbeating to localhost/127.0.0.1:45175 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:56:25,695 WARN [BP-700663152-172.17.0.2-1730984183715 heartbeating to localhost/127.0.0.1:45175 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-700663152-172.17.0.2-1730984183715 (Datanode Uuid 37f85710-5889-41a5-be8b-09b38bcc6c26) service to localhost/127.0.0.1:45175 2024-11-07T12:56:25,695 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:56:25,695 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:56:25,696 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/cluster_978c7eb0-8174-9421-abe7-9e1b4279a1c9/data/data3/current/BP-700663152-172.17.0.2-1730984183715 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:56:25,696 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/cluster_978c7eb0-8174-9421-abe7-9e1b4279a1c9/data/data4/current/BP-700663152-172.17.0.2-1730984183715 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:56:25,696 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:56:25,698 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@259cffcc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:56:25,698 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41f14207{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:56:25,698 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:56:25,698 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@333ec6ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:56:25,698 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79ca80d8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/hadoop.log.dir/,STOPPED} 2024-11-07T12:56:25,700 WARN [BP-700663152-172.17.0.2-1730984183715 heartbeating to localhost/127.0.0.1:45175 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:56:25,700 WARN [BP-700663152-172.17.0.2-1730984183715 heartbeating to localhost/127.0.0.1:45175 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-700663152-172.17.0.2-1730984183715 (Datanode Uuid 2ad52c2e-7173-458e-8ebf-166a42701a32) service to localhost/127.0.0.1:45175 2024-11-07T12:56:25,700 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:56:25,700 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:56:25,700 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/cluster_978c7eb0-8174-9421-abe7-9e1b4279a1c9/data/data1/current/BP-700663152-172.17.0.2-1730984183715 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:56:25,701 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/cluster_978c7eb0-8174-9421-abe7-9e1b4279a1c9/data/data2/current/BP-700663152-172.17.0.2-1730984183715 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:56:25,701 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:56:25,706 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d974bb{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:56:25,707 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1cb0de45{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:56:25,707 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:56:25,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b4c94a0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:56:25,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c411d10{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c8f7899-575f-f508-7acd-bfff31ad4791/hadoop.log.dir/,STOPPED} 2024-11-07T12:56:25,713 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-07T12:56:25,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-07T12:56:25,739 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 227) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45175 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45175 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45175 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45175 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:45175 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45175 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45175 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45175 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=536 (was 509) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=312 (was 322), ProcessCount=11 (was 11), AvailableMemoryMB=7634 (was 7647)